diff --git a/config/arm64/default b/config/arm64/default index c7e2bf1..2388b19 100644 --- a/config/arm64/default +++ b/config/arm64/default @@ -3641,7 +3641,7 @@ CONFIG_PRINTER=m CONFIG_PPDEV=m CONFIG_HVC_DRIVER=y # CONFIG_HVC_DCC is not set -CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m CONFIG_IPMI_DMI_DECODE=y CONFIG_IPMI_PANIC_EVENT=y @@ -6634,7 +6634,7 @@ CONFIG_VFIO_MDEV=m CONFIG_VFIO_MDEV_DEVICE=m CONFIG_IRQ_BYPASS_MANAGER=y CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=m +CONFIG_VIRTIO=y # # Virtio drivers diff --git a/config/arm64/vanilla b/config/arm64/vanilla index e9b3af6..c17b54b 100644 --- a/config/arm64/vanilla +++ b/config/arm64/vanilla @@ -47,5 +47,7 @@ CONFIG_STREAM_PARSER=m CONFIG_TREE_RCU_TRACE=y CONFIG_TYPEC_TCPCI=m CONFIG_UCSI=m +CONFIG_VIRTIO=m +CONFIG_VIRTIO_CONSOLE=m CONFIG_MODULES=y CONFIG_EFI_STUB=y diff --git a/config/ppc64le/default b/config/ppc64le/default index 88a6e77..0d2ff6a 100644 --- a/config/ppc64le/default +++ b/config/ppc64le/default @@ -2237,10 +2237,10 @@ CONFIG_NET_VENDOR_EXAR=y CONFIG_S2IO=m CONFIG_VXGE=m # CONFIG_VXGE_DEBUG_TRACE_ALL is not set -CONFIG_NET_VENDOR_HP=y -# CONFIG_HP100 is not set CONFIG_NET_VENDOR_GOOGLE=y CONFIG_GVE=m +CONFIG_NET_VENDOR_HP=y +# CONFIG_HP100 is not set CONFIG_NET_VENDOR_IBM=y CONFIG_IBMVETH=m # CONFIG_IBM_EMAC_ZMII is not set @@ -2977,7 +2977,7 @@ CONFIG_HVC_OPAL=y CONFIG_HVC_RTAS=y # CONFIG_HVC_UDBG is not set CONFIG_HVCS=m -CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_CONSOLE=y CONFIG_IBM_BSR=m CONFIG_POWERNV_OP_PANEL=m CONFIG_IPMI_HANDLER=m @@ -4368,7 +4368,7 @@ CONFIG_VFIO_MDEV=m CONFIG_VFIO_MDEV_DEVICE=m CONFIG_IRQ_BYPASS_MANAGER=y CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=m +CONFIG_VIRTIO=y # # Virtio drivers diff --git a/config/ppc64le/vanilla b/config/ppc64le/vanilla index a4ae7e3..688c4e5 100644 --- a/config/ppc64le/vanilla +++ b/config/ppc64le/vanilla @@ -32,4 +32,6 @@ CONFIG_RTC_DRV_DS1307_HWMON=y CONFIG_STREAM_PARSER=m # CONFIG_SYSTEM_DATA_VERIFICATION is not set CONFIG_TREE_RCU_TRACE=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_CONSOLE=m CONFIG_MODULES=y diff --git a/config/x86_64/default b/config/x86_64/default index 0c4e417..ab39dfe 100644 --- a/config/x86_64/default +++ b/config/x86_64/default @@ -3735,7 +3735,7 @@ CONFIG_HVC_DRIVER=y CONFIG_HVC_IRQ=y CONFIG_HVC_XEN=y CONFIG_HVC_XEN_FRONTEND=y -CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m CONFIG_IPMI_DMI_DECODE=y CONFIG_IPMI_PANIC_EVENT=y @@ -6345,7 +6345,7 @@ CONFIG_VFIO_MDEV=m CONFIG_VFIO_MDEV_DEVICE=m CONFIG_IRQ_BYPASS_MANAGER=m CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO=m +CONFIG_VIRTIO=y # # Virtio drivers diff --git a/config/x86_64/vanilla b/config/x86_64/vanilla index 7f57463..b1132b9 100644 --- a/config/x86_64/vanilla +++ b/config/x86_64/vanilla @@ -37,5 +37,7 @@ CONFIG_STREAM_PARSER=m CONFIG_TREE_RCU_TRACE=y CONFIG_TYPEC_TCPCI=m CONFIG_UCSI=m +CONFIG_VIRTIO=m +CONFIG_VIRTIO_CONSOLE=m CONFIG_MODULES=y CONFIG_EFI_STUB=y diff --git a/patches.kabi/kabi-hide-new-parameter-of-ip6_dst_lookup_flow.patch b/patches.kabi/kabi-hide-new-parameter-of-ip6_dst_lookup_flow.patch new file mode 100644 index 0000000..06e6efb --- /dev/null +++ b/patches.kabi/kabi-hide-new-parameter-of-ip6_dst_lookup_flow.patch @@ -0,0 +1,248 @@ +From: Michal Kubecek +Date: Mon, 31 Aug 2020 22:55:23 +0200 +Subject: kabi: hide new parameter of ip6_dst_lookup_flow() +Patch-mainline: Never, kabi workaround +References: bsc#1165629 + +Backport of mainline commit c4e85f73afb6 ("net: ipv6: add net argument to +ip6_dst_lookup_flow") added a new parameter net to exported function +ip6_dst_lookup_flow(). + +Rename modified function to ip6_dst_lookup_flow__net() and create +ip6_dst_lookup_flow() with original signature and behaviour (which is just +a wrapper around ip6_dst_lookup_flow__net()) in case some out of tree +module calls it. All in tree callers call ip6_dst_lookup_flow__net(). + +Signed-off-by: Michal Kubecek +--- + include/net/ipv6.h | 4 +++- + net/dccp/ipv6.c | 6 +++--- + net/ipv6/af_inet6.c | 5 +++-- + net/ipv6/datagram.c | 2 +- + net/ipv6/inet6_connection_sock.c | 4 ++-- + net/ipv6/ip6_output.c | 18 ++++++++++++++---- + net/ipv6/raw.c | 2 +- + net/ipv6/syncookies.c | 3 ++- + net/ipv6/tcp_ipv6.c | 4 ++-- + net/l2tp/l2tp_ip6.c | 2 +- + net/sctp/ipv6.c | 5 +++-- + 11 files changed, 35 insertions(+), 20 deletions(-) + +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -868,8 +868,10 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) + + int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, + struct flowi6 *fl6); +-struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, ++struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst); ++struct dst_entry *ip6_dst_lookup_flow__net(struct net *net, const struct sock *sk, struct flowi6 *fl6, ++ const struct in6_addr *final_dst); + struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst); + struct dst_entry *ip6_blackhole_route(struct net *net, +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -209,7 +209,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + dst = NULL; +@@ -280,7 +280,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) + security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); + + /* sk = NULL, but it is safe for now. RST socket required. */ +- dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); ++ dst = ip6_dst_lookup_flow__net(sock_net(ctl_sk), ctl_sk, &fl6, NULL); + if (!IS_ERR(dst)) { + skb_dst_set(skb, dst); + ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); +@@ -888,7 +888,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); + final_p = fl6_update_dst(&fl6, opt, &final); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto failure; +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -740,7 +740,8 @@ int inet6_sk_rebuild_header(struct sock *sk) + &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, ++ final_p); + if (IS_ERR(dst)) { + sk->sk_route_caps = 0; + sk->sk_err_soft = -PTR_ERR(dst); +@@ -898,7 +899,7 @@ static struct pernet_operations inet6_net_ops = { + static const struct ipv6_stub ipv6_stub_impl = { + .ipv6_sock_mc_join = ipv6_sock_mc_join, + .ipv6_sock_mc_drop = ipv6_sock_mc_drop, +- .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, ++ .ipv6_dst_lookup_flow = ip6_dst_lookup_flow__net, + .udpv6_encap_enable = udpv6_encap_enable, + .ndisc_send_na = ndisc_send_na, + .nd_tbl = &nd_tbl, +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -88,7 +88,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) + final_p = fl6_update_dst(&fl6, opt, &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; +--- a/net/ipv6/inet6_connection_sock.c ++++ b/net/ipv6/inet6_connection_sock.c +@@ -52,7 +52,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, + fl6->flowi6_uid = sk->sk_uid; + security_req_classify_flow(req, flowi6_to_flowi(fl6)); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, fl6, final_p); + if (IS_ERR(dst)) + return NULL; + +@@ -107,7 +107,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, + + dst = __inet6_csk_dst_check(sk, np->dst_cookie); + if (!dst) { +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, fl6, final_p); + + if (!IS_ERR(dst)) + ip6_dst_store(sk, dst, NULL, NULL); +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1068,7 +1068,7 @@ int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, + EXPORT_SYMBOL_GPL(ip6_dst_lookup); + + /** +- * ip6_dst_lookup_flow - perform route lookup on flow with ipsec ++ * ip6_dst_lookup_flow__net - perform route lookup on flow with ipsec + * @sk: socket which provides route info + * @fl6: flow to lookup + * @final_dst: final destination address for ipsec lookup +@@ -1078,8 +1078,8 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); + * It returns a valid dst pointer on success, or a pointer encoded + * error code. + */ +-struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, +- const struct in6_addr *final_dst) ++struct dst_entry *ip6_dst_lookup_flow__net(struct net *net, const struct sock *sk, struct flowi6 *fl6, ++ const struct in6_addr *final_dst) + { + struct dst_entry *dst = NULL; + int err; +@@ -1092,6 +1092,15 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st + + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); + } ++EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow__net); ++ ++ ++/* We need to preserve this one for kABI. */ ++struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, ++ const struct in6_addr *final_dst) ++{ ++ return ip6_dst_lookup_flow__net(sock_net(sk), sk, fl6, final_dst); ++} + EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); + + /** +@@ -1115,7 +1124,8 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + + dst = ip6_sk_dst_check(sk, dst, fl6); + if (!dst) +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, fl6, ++ final_dst); + + return dst; + } +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -926,7 +926,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; +--- a/net/ipv6/syncookies.c ++++ b/net/ipv6/syncookies.c +@@ -239,7 +239,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) + fl6.flowi6_uid = sk->sk_uid; + security_req_classify_flow(req, flowi6_to_flowi(&fl6)); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, ++ final_p); + if (IS_ERR(dst)) + goto out_free; + } +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -267,7 +267,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto failure; +@@ -866,7 +866,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 + * Underlying function will use this to retrieve the network + * namespace + */ +- dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); ++ dst = ip6_dst_lookup_flow__net(sock_net(ctl_sk), ctl_sk, &fl6, NULL); + if (!IS_ERR(dst)) { + skb_dst_set(buff, dst); + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -639,7 +639,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -271,7 +271,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); ++ dst = ip6_dst_lookup_flow__net(sock_net(sk), sk, fl6, final_p); + if (!asoc || saddr) { + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); +@@ -329,7 +329,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + fl6->saddr = laddr->a.v6.sin6_addr; + fl6->fl6_sport = laddr->a.v6.sin6_port; + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); +- bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); ++ bdst = ip6_dst_lookup_flow__net(sock_net(sk), sk, fl6, ++ final_p); + + if (IS_ERR(bdst)) + continue; diff --git a/patches.kabi/kabi-mask-changes-to-struct-ipv6_stub.patch b/patches.kabi/kabi-mask-changes-to-struct-ipv6_stub.patch new file mode 100644 index 0000000..09c127f --- /dev/null +++ b/patches.kabi/kabi-mask-changes-to-struct-ipv6_stub.patch @@ -0,0 +1,91 @@ +From: Michal Kubecek +Date: Mon, 31 Aug 2020 22:05:44 +0200 +Subject: kabi: mask changes to struct ipv6_stub +Patch-mainline: Never, kabi workaround +References: bsc#1165629 + +Backport of mainline commit 6c8991f41546 ("net: ipv6_stub: use +ip6_dst_lookup_flow instead of ip6_dst_lookup") replaces callback +->ipv6_dst_lookup() in struct ipv6_stub with a ->ipv6_dst_lookup_flow(). + +As there are only two instances of the structure and both are static +function in kernel code (and only one is actually used as we don't build +IPv6 support as a module any more), we can safely add new members at the +end of the structure. + +Move ->ipv6_dst_lookup_flow() callbac to the end of the function, hide it +from kabi checker and restore ->ipv6_dst_lookup() for potential callers in +out of tree modules built against previous kernels. + +Signed-off-by: Michal Kubecek +--- + include/net/addrconf.h | 12 ++++++++---- + net/ipv6/addrconf_core.c | 8 ++++++++ + net/ipv6/af_inet6.c | 3 ++- + 3 files changed, 18 insertions(+), 5 deletions(-) + +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -223,15 +223,19 @@ struct ipv6_stub { + const struct in6_addr *addr); + int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, + const struct in6_addr *addr); +- struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, +- const struct sock *sk, +- struct flowi6 *fl6, +- const struct in6_addr *final_dst); ++ int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, ++ struct dst_entry **dst, struct flowi6 *fl6); + void (*udpv6_encap_enable)(void); + void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, + bool router, bool solicited, bool override, bool inc_opt); + struct neigh_table *nd_tbl; ++#ifndef __GENKSYMS__ ++ struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, ++ const struct sock *sk, ++ struct flowi6 *fl6, ++ const struct in6_addr *final_dst); ++#endif /* __GENKSYMS__ */ + }; + extern const struct ipv6_stub *ipv6_stub __read_mostly; + +--- a/net/ipv6/addrconf_core.c ++++ b/net/ipv6/addrconf_core.c +@@ -127,6 +127,13 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) + } + EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); + ++static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, ++ struct dst_entry **u2, ++ struct flowi6 *u3) ++{ ++ return -EAFNOSUPPORT; ++} ++ + static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, +@@ -136,6 +143,7 @@ static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, + } + + const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { ++ .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, + }; + EXPORT_SYMBOL_GPL(ipv6_stub); +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -899,10 +899,11 @@ static struct pernet_operations inet6_net_ops = { + static const struct ipv6_stub ipv6_stub_impl = { + .ipv6_sock_mc_join = ipv6_sock_mc_join, + .ipv6_sock_mc_drop = ipv6_sock_mc_drop, +- .ipv6_dst_lookup_flow = ip6_dst_lookup_flow__net, ++ .ipv6_dst_lookup = ip6_dst_lookup, + .udpv6_encap_enable = udpv6_encap_enable, + .ndisc_send_na = ndisc_send_na, + .nd_tbl = &nd_tbl, ++ .ipv6_dst_lookup_flow = ip6_dst_lookup_flow__net, + }; + + static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = { diff --git a/patches.kabi/scsi-fc-kABI-fixes-for-new-ELS_FPIN-definition.patch b/patches.kabi/scsi-fc-kABI-fixes-for-new-ELS_FPIN-definition.patch index 097d83e..dd93451 100644 --- a/patches.kabi/scsi-fc-kABI-fixes-for-new-ELS_FPIN-definition.patch +++ b/patches.kabi/scsi-fc-kABI-fixes-for-new-ELS_FPIN-definition.patch @@ -10,11 +10,12 @@ Signed-off-by: Daniel Wagner --- --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h -@@ -53,7 +53,9 @@ enum fc_els_cmd { +@@ -53,8 +53,10 @@ enum fc_els_cmd { ELS_REC = 0x13, /* read exchange concise */ ELS_SRR = 0x14, /* sequence retransmission request */ ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */ +#ifndef __GENKSYMS__ + ELS_RDP = 0x18, /* Read Diagnostic Parameters */ ELS_RDF = 0x19, /* Register Diagnostic Functions */ +#endif ELS_PRLI = 0x20, /* process login */ diff --git a/patches.suse/0001-Revert-xen-balloon-Fix-crash-when-ballooning-on-x86-.patch b/patches.suse/0001-Revert-xen-balloon-Fix-crash-when-ballooning-on-x86-.patch new file mode 100644 index 0000000..763c686 --- /dev/null +++ b/patches.suse/0001-Revert-xen-balloon-Fix-crash-when-ballooning-on-x86-.patch @@ -0,0 +1,59 @@ +Patch-mainline: v5.9-rc1 +Git-commit: f5ec6723269d9652ce42927cc03485061d663b23 +References: bsc#1065600 +From: Roger Pau Monne +Date: Mon, 27 Jul 2020 11:13:41 +0200 +Subject: [PATCH] Revert "xen/balloon: Fix crash when ballooning on x86 32 bit + PAE" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This reverts commit dfd74a1edfaba5864276a2859190a8d242d18952. + +This has been fixed by commit dca4436d1cf9e0d237c which added the out +of bounds check to __add_memory, so that trying to add blocks past +MAX_PHYSMEM_BITS will fail. + +Note the check in the Xen balloon driver was bogus anyway, as it +checked the start address of the resource, but it should instead test +the end address to assert the whole resource falls below +MAX_PHYSMEM_BITS. + +Signed-off-by: Roger Pau Monné +Reviewed-by: Juergen Gross +Link: https://lore.kernel.org/r/20200727091342.52325-4-roger.pau@citrix.com +Signed-off-by: Juergen Gross +--- + drivers/xen/balloon.c | 14 -------------- + 1 file changed, 14 deletions(-) + +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 292413b27575..b1d8b028bf80 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -322,21 +322,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) + } + } + +-#ifdef CONFIG_SPARSEMEM +- { +- unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); +- unsigned long pfn = res->start >> PAGE_SHIFT; +- +- if (pfn > limit) { +- pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", +- pfn, limit); +- release_memory_resource(res); +- release_memory_resource(res_hostmem); +- return NULL; +- } +- } +-#endif +- + return res; + } + +-- +2.26.2 + diff --git a/patches.suse/0001-bcache-allocate-meta-data-pages-as-compound-pages.patch b/patches.suse/0001-bcache-allocate-meta-data-pages-as-compound-pages.patch new file mode 100644 index 0000000..78bd523 --- /dev/null +++ b/patches.suse/0001-bcache-allocate-meta-data-pages-as-compound-pages.patch @@ -0,0 +1,88 @@ +From 5fe48867856367142d91a82f2cbf7a57a24cbb70 Mon Sep 17 00:00:00 2001 +From: Coly Li +Date: Sat, 25 Jul 2020 20:00:16 +0800 +Subject: [PATCH] bcache: allocate meta data pages as compound pages +Git-commit: 5fe48867856367142d91a82f2cbf7a57a24cbb70 +Patch-mainline: v5.9-rc1 +References: bsc#1172873 + +There are some meta data of bcache are allocated by multiple pages, +and they are used as bio bv_page for I/Os to the cache device. for +example cache_set->uuids, cache->disk_buckets, journal_write->data, +bset_tree->data. + +For such meta data memory, all the allocated pages should be treated +as a single memory block. Then the memory management and underlying I/O +code can treat them more clearly. + +This patch adds __GFP_COMP flag to all the location allocating >0 order +pages for the above mentioned meta data. Then their pages are treated +as compound pages now. + +Signed-off-by: Coly Li +Cc: stable@vger.kernel.org +Signed-off-by: Jens Axboe +--- + drivers/md/bcache/bset.c | 2 +- + drivers/md/bcache/btree.c | 2 +- + drivers/md/bcache/journal.c | 4 ++-- + drivers/md/bcache/super.c | 2 +- + 4 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c +index 4995fcaefe29..67a2c47f4201 100644 +--- a/drivers/md/bcache/bset.c ++++ b/drivers/md/bcache/bset.c +@@ -322,7 +322,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, + + b->page_order = page_order; + +- t->data = (void *) __get_free_pages(gfp, b->page_order); ++ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); + if (!t->data) + goto err; + +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c +index d5c51e332046..1e3bd5ea5486 100644 +--- a/drivers/md/bcache/btree.c ++++ b/drivers/md/bcache/btree.c +@@ -785,7 +785,7 @@ int bch_btree_cache_alloc(struct cache_set *c) + mutex_init(&c->verify_lock); + + c->verify_ondisk = (void *) +- __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); ++ __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c))); + + c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); + +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c +index 90aac4e2333f..d8586b6ccb76 100644 +--- a/drivers/md/bcache/journal.c ++++ b/drivers/md/bcache/journal.c +@@ -999,8 +999,8 @@ int bch_journal_alloc(struct cache_set *c) + j->w[1].c = c; + + if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || +- !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || +- !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) ++ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) || ++ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS))) + return -ENOMEM; + + return 0; +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index fb54a59ffe12..4a77bfd4009f 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -1784,7 +1784,7 @@ void bch_cache_set_unregister(struct cache_set *c) + } + + #define alloc_bucket_pages(gfp, c) \ +- ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) ++ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) + + struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) + { +-- +2.26.2 + diff --git a/patches.suse/0001-block-improve-discard-bio-alignment-in-__blkdev_issu.patch b/patches.suse/0001-block-improve-discard-bio-alignment-in-__blkdev_issu.patch new file mode 100644 index 0000000..f8c0a26 --- /dev/null +++ b/patches.suse/0001-block-improve-discard-bio-alignment-in-__blkdev_issu.patch @@ -0,0 +1,217 @@ +From 9b15d109a6b2c0c604c588d49a5a927dac6dd616 Mon Sep 17 00:00:00 2001 +From: Coly Li +Date: Fri, 17 Jul 2020 10:42:30 +0800 +Subject: [PATCH] block: improve discard bio alignment in + __blkdev_issue_discard() +Git-commit: 9b15d109a6b2c0c604c588d49a5a927dac6dd616 +Patch-mainline: v5.9-rc1 +References: bsc#1152148 + +This patch improves discard bio split for address and size alignment in +__blkdev_issue_discard(). The aligned discard bio may help underlying +device controller to perform better discard and internal garbage +collection, and avoid unnecessary internal fragment. + +Current discard bio split algorithm in __blkdev_issue_discard() may have +non-discarded fregment on device even the discard bio LBA and size are +both aligned to device's discard granularity size. + +Here is the example steps on how to reproduce the above problem. +- On a VMWare ESXi 6.5 update3 installation, create a 51GB virtual disk + with thin mode and give it to a Linux virtual machine. +- Inside the Linux virtual machine, if the 50GB virtual disk shows up as + /dev/sdb, fill data into the first 50GB by, + # dd if=/dev/zero of=/dev/sdb bs=4096 count=13107200 +- Discard the 50GB range from offset 0 on /dev/sdb, + # blkdiscard /dev/sdb -o 0 -l 53687091200 +- Observe the underlying mapping status of the device + # sg_get_lba_status /dev/sdb -m 1048 --lba=0 + descriptor LBA: 0x0000000000000000 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000000000800 blocks: 16773120 deallocated + descriptor LBA: 0x0000000000fff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000001000000 blocks: 8386560 deallocated + descriptor LBA: 0x00000000017ff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000001800000 blocks: 8386560 deallocated + descriptor LBA: 0x0000000001fff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000002000000 blocks: 8386560 deallocated + descriptor LBA: 0x00000000027ff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000002800000 blocks: 8386560 deallocated + descriptor LBA: 0x0000000002fff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000003000000 blocks: 8386560 deallocated + descriptor LBA: 0x00000000037ff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000003800000 blocks: 8386560 deallocated + descriptor LBA: 0x0000000003fff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000004000000 blocks: 8386560 deallocated + descriptor LBA: 0x00000000047ff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000004800000 blocks: 8386560 deallocated + descriptor LBA: 0x0000000004fff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000005000000 blocks: 8386560 deallocated + descriptor LBA: 0x00000000057ff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000005800000 blocks: 8386560 deallocated + descriptor LBA: 0x0000000005fff800 blocks: 2048 mapped (or unknown) + descriptor LBA: 0x0000000006000000 blocks: 6291456 deallocated + descriptor LBA: 0x0000000006600000 blocks: 0 deallocated + +Although the discard bio starts at LBA 0 and has 50<<30 bytes size which +are perfect aligned to the discard granularity, from the above list +these are many 1MB (2048 sectors) internal fragments exist unexpectedly. + +The problem is in __blkdev_issue_discard(), an improper algorithm causes +an improper bio size which is not aligned. + + 25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + 26 sector_t nr_sects, gfp_t gfp_mask, int flags, + 27 struct bio **biop) + 28 { + 29 struct request_queue *q = bdev_get_queue(bdev); + [snipped] + 56 + 57 while (nr_sects) { + 58 sector_t req_sects = min_t(sector_t, nr_sects, + 59 bio_allowed_max_sectors(q)); + 60 + 61 WARN_ON_ONCE((req_sects << 9) > UINT_MAX); + 62 + 63 bio = blk_next_bio(bio, 0, gfp_mask); + 64 bio->bi_iter.bi_sector = sector; + 65 bio_set_dev(bio, bdev); + 66 bio_set_op_attrs(bio, op, 0); + 67 + 68 bio->bi_iter.bi_size = req_sects << 9; + 69 sector += req_sects; + 70 nr_sects -= req_sects; + [snipped] + 79 } + 80 + 81 *biop = bio; + 82 return 0; + 83 } + 84 EXPORT_SYMBOL(__blkdev_issue_discard); + +At line 58-59, to discard a 50GB range, req_sects is set as return value +of bio_allowed_max_sectors(q), which is 8388607 sectors. In the above +case, the discard granularity is 2048 sectors, although the start LBA +and discard length are aligned to discard granularity, req_sects never +has chance to be aligned to discard granularity. This is why there are +some still-mapped 2048 sectors fragment in every 4 or 8 GB range. + +If req_sects at line 58 is set to a value aligned to discard_granularity +and close to UNIT_MAX, then all consequent split bios inside device +driver are (almostly) aligned to discard_granularity of the device +queue. The 2048 sectors still-mapped fragment will disappear. + +This patch introduces bio_aligned_discard_max_sectors() to return the +the value which is aligned to q->limits.discard_granularity and closest +to UINT_MAX. Then this patch replaces bio_allowed_max_sectors() with +this new routine to decide a more proper split bio length. + +But we still need to handle the situation when discard start LBA is not +aligned to q->limits.discard_granularity, otherwise even the length is +aligned, current code may still leave 2048 fragment around every 4GB +range. Therefore, to calculate req_sects, firstly the start LBA of +discard range is checked (including partition offset), if it is not +aligned to discard granularity, the first split location should make +sure following bio has bi_sector aligned to discard granularity. Then +there won't be still-mapped fragment in the middle of the discard range. + +The above is how this patch improves discard bio alignment in +__blkdev_issue_discard(). Now with this patch, after discard with same +command line mentiond previously, sg_get_lba_status returns, +descriptor LBA: 0x0000000000000000 blocks: 106954752 deallocated +descriptor LBA: 0x0000000006600000 blocks: 0 deallocated + +We an see there is no 2048 sectors segment anymore, everything is clean. + +Reported-and-tested-by: Acshai Manoj +Signed-off-by: Coly Li +Reviewed-by: Hannes Reinecke +Reviewed-by: Ming Lei +Reviewed-by: Xiao Ni +Cc: Bart Van Assche +Cc: Christoph Hellwig +Cc: Enzo Matsumiya +Cc: Jens Axboe +Signed-off-by: Jens Axboe +--- + block/blk-lib.c | 31 ++++++++++++++++++++++++++++--- + block/blk.h | 14 ++++++++++++++ + 2 files changed, 42 insertions(+), 3 deletions(-) + +diff --git a/block/blk-lib.c b/block/blk-lib.c +index 5f2c429d4378..019e09bb9c0e 100644 +--- a/block/blk-lib.c ++++ b/block/blk-lib.c +@@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + struct request_queue *q = bdev_get_queue(bdev); + struct bio *bio = *biop; + unsigned int op; +- sector_t bs_mask; ++ sector_t bs_mask, part_offset = 0; + + if (!q) + return -ENXIO; +@@ -54,9 +54,34 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + if (!nr_sects) + return -EINVAL; + ++ /* In case the discard request is in a partition */ ++ if (bdev->bd_partno) ++ part_offset = bdev->bd_part->start_sect; ++ + while (nr_sects) { +- sector_t req_sects = min_t(sector_t, nr_sects, +- bio_allowed_max_sectors(q)); ++ sector_t granularity_aligned_lba, req_sects; ++ sector_t sector_mapped = sector + part_offset; ++ ++ granularity_aligned_lba = round_up(sector_mapped, ++ q->limits.discard_granularity >> SECTOR_SHIFT); ++ ++ /* ++ * Check whether the discard bio starts at a discard_granularity ++ * aligned LBA, ++ * - If no: set (granularity_aligned_lba - sector_mapped) to ++ * bi_size of the first split bio, then the second bio will ++ * start at a discard_granularity aligned LBA on the device. ++ * - If yes: use bio_aligned_discard_max_sectors() as the max ++ * possible bi_size of the first split bio. Then when this bio ++ * is split in device drive, the split ones are very probably ++ * to be aligned to discard_granularity of the device's queue. ++ */ ++ if (granularity_aligned_lba == sector_mapped) ++ req_sects = min_t(sector_t, nr_sects, ++ bio_aligned_discard_max_sectors(q)); ++ else ++ req_sects = min_t(sector_t, nr_sects, ++ granularity_aligned_lba - sector_mapped); + + WARN_ON_ONCE((req_sects << 9) > UINT_MAX); + +diff --git a/block/blk.h b/block/blk.h +index 9dcf51c94096..49e2928a1632 100644 +--- a/block/blk.h ++++ b/block/blk.h +@@ -264,6 +264,20 @@ static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) + return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; + } + ++/* ++ * The max bio size which is aligned to q->limits.discard_granularity. This ++ * is a hint to split large discard bio in generic block layer, then if device ++ * driver needs to split the discard bio into smaller ones, their bi_size can ++ * be very probably and easily aligned to discard_granularity of the device's ++ * queue. ++ */ ++static inline unsigned int bio_aligned_discard_max_sectors( ++ struct request_queue *q) ++{ ++ return round_down(UINT_MAX, q->limits.discard_granularity) >> ++ SECTOR_SHIFT; ++} ++ + /* + * Internal io_context interface + */ +-- +2.26.2 + diff --git a/patches.suse/0001-drm-xen-front-Fix-misused-IS_ERR_OR_NULL-checks.patch b/patches.suse/0001-drm-xen-front-Fix-misused-IS_ERR_OR_NULL-checks.patch new file mode 100644 index 0000000..53a80b5 --- /dev/null +++ b/patches.suse/0001-drm-xen-front-Fix-misused-IS_ERR_OR_NULL-checks.patch @@ -0,0 +1,110 @@ +Patch-mainline: v5.9-rc1 +Git-commit: 14dee058610446aa464254fc5c8e88c7535195e0 +References: bsc#1065600 +From: Oleksandr Andrushchenko +Date: Thu, 13 Aug 2020 09:21:10 +0300 +Subject: [PATCH] drm/xen-front: Fix misused IS_ERR_OR_NULL checks + +The patch c575b7eeb89f: "drm/xen-front: Add support for Xen PV +display frontend" from Apr 3, 2018, leads to the following static +checker warning: + + drivers/gpu/drm/xen/xen_drm_front_gem.c:140 xen_drm_front_gem_create() + warn: passing zero to 'ERR_CAST' + +drivers/gpu/drm/xen/xen_drm_front_gem.c + 133 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, + 134 size_t size) + 135 { + 136 struct xen_gem_object *xen_obj; + 137 + 138 xen_obj = gem_create(dev, size); + 139 if (IS_ERR_OR_NULL(xen_obj)) + 140 return ERR_CAST(xen_obj); + +Fix this and the rest of misused places with IS_ERR_OR_NULL in the +driver. + +Fixes: c575b7eeb89f: "drm/xen-front: Add support for Xen PV display frontend" + +Signed-off-by: Oleksandr Andrushchenko +Reported-by: Dan Carpenter +Reviewed-by: Dan Carpenter +Cc: +Link: https://lore.kernel.org/r/20200813062113.11030-3-andr2000@gmail.com +Signed-off-by: Juergen Gross +--- + drivers/gpu/drm/xen/xen_drm_front.c | 4 ++-- + drivers/gpu/drm/xen/xen_drm_front_gem.c | 8 ++++---- + drivers/gpu/drm/xen/xen_drm_front_kms.c | 2 +- + 3 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c +index 1fd458e877ca..51818e76facd 100644 +--- a/drivers/gpu/drm/xen/xen_drm_front.c ++++ b/drivers/gpu/drm/xen/xen_drm_front.c +@@ -400,7 +400,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, + args->size = args->pitch * args->height; + + obj = xen_drm_front_gem_create(dev, args->size); +- if (IS_ERR_OR_NULL(obj)) { ++ if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto fail; + } +diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c +index f0b85e094111..4ec8a49241e1 100644 +--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c ++++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c +@@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) + + size = round_up(size, PAGE_SIZE); + xen_obj = gem_create_obj(dev, size); +- if (IS_ERR_OR_NULL(xen_obj)) ++ if (IS_ERR(xen_obj)) + return xen_obj; + + if (drm_info->front_info->cfg.be_alloc) { +@@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) + */ + xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); + xen_obj->pages = drm_gem_get_pages(&xen_obj->base); +- if (IS_ERR_OR_NULL(xen_obj->pages)) { ++ if (IS_ERR(xen_obj->pages)) { + ret = PTR_ERR(xen_obj->pages); + xen_obj->pages = NULL; + goto fail; +@@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, + struct xen_gem_object *xen_obj; + + xen_obj = gem_create(dev, size); +- if (IS_ERR_OR_NULL(xen_obj)) ++ if (IS_ERR(xen_obj)) + return ERR_CAST(xen_obj); + + return &xen_obj->base; +@@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, + + size = attach->dmabuf->size; + xen_obj = gem_create_obj(dev, size); +- if (IS_ERR_OR_NULL(xen_obj)) ++ if (IS_ERR(xen_obj)) + return ERR_CAST(xen_obj); + + ret = gem_alloc_pages_array(xen_obj, size); +diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c +index 78096bbcd226..ef11b1e4de39 100644 +--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c ++++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c +@@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp, + int ret; + + fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); +- if (IS_ERR_OR_NULL(fb)) ++ if (IS_ERR(fb)) + return fb; + + gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); +-- +2.26.2 + diff --git a/patches.suse/0001-xen-balloon-fix-accounting-in-alloc_xenballooned_pag.patch b/patches.suse/0001-xen-balloon-fix-accounting-in-alloc_xenballooned_pag.patch new file mode 100644 index 0000000..094b869 --- /dev/null +++ b/patches.suse/0001-xen-balloon-fix-accounting-in-alloc_xenballooned_pag.patch @@ -0,0 +1,45 @@ +Patch-mainline: v5.9-rc1 +Git-commit: 1951fa33ec259abdf3497bfee7b63e7ddbb1a394 +References: bsc#1065600 +From: Roger Pau Monne +Date: Mon, 27 Jul 2020 11:13:39 +0200 +Subject: [PATCH] xen/balloon: fix accounting in alloc_xenballooned_pages error + path +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +target_unpopulated is incremented with nr_pages at the start of the +function, but the call to free_xenballooned_pages will only subtract +pgno number of pages, and thus the rest need to be subtracted before +returning or else accounting will be skewed. + +Signed-off-by: Roger Pau Monné +Reviewed-by: Juergen Gross +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20200727091342.52325-2-roger.pau@citrix.com +Signed-off-by: Juergen Gross +--- + drivers/xen/balloon.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 77c57568e5d7..3cb10ed32557 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -630,6 +630,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) + out_undo: + mutex_unlock(&balloon_mutex); + free_xenballooned_pages(pgno, pages); ++ /* ++ * NB: free_xenballooned_pages will only subtract pgno pages, but since ++ * target_unpopulated is incremented with nr_pages at the start we need ++ * to remove the remaining ones also, or accounting will be screwed. ++ */ ++ balloon_stats.target_unpopulated -= nr_pages - pgno; + return ret; + } + EXPORT_SYMBOL(alloc_xenballooned_pages); +-- +2.26.2 + diff --git a/patches.suse/0001-xen-balloon-make-the-balloon-wait-interruptible.patch b/patches.suse/0001-xen-balloon-make-the-balloon-wait-interruptible.patch new file mode 100644 index 0000000..2b6046e --- /dev/null +++ b/patches.suse/0001-xen-balloon-make-the-balloon-wait-interruptible.patch @@ -0,0 +1,45 @@ +Patch-mainline: v5.9-rc1 +Git-commit: 88a479ff6ef8af7f07e11593d58befc644244ff7 +References: bsc#1065600 +From: Roger Pau Monne +Date: Mon, 27 Jul 2020 11:13:40 +0200 +Subject: [PATCH] xen/balloon: make the balloon wait interruptible +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +So it can be killed, or else processes can get hung indefinitely +waiting for balloon pages. + +Signed-off-by: Roger Pau Monné +Reviewed-by: Juergen Gross +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20200727091342.52325-3-roger.pau@citrix.com +Signed-off-by: Juergen Gross +--- + drivers/xen/balloon.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 3cb10ed32557..292413b27575 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -568,11 +568,13 @@ static int add_ballooned_pages(int nr_pages) + if (xen_hotplug_unpopulated) { + st = reserve_additional_memory(); + if (st != BP_ECANCELED) { ++ int rc; ++ + mutex_unlock(&balloon_mutex); +- wait_event(balloon_wq, ++ rc = wait_event_interruptible(balloon_wq, + !list_empty(&ballooned_pages)); + mutex_lock(&balloon_mutex); +- return 0; ++ return rc ? -ENOMEM : 0; + } + } + +-- +2.26.2 + diff --git a/patches.suse/0002-block-check-queue-s-limits.discard_granularity-in-__.patch b/patches.suse/0002-block-check-queue-s-limits.discard_granularity-in-__.patch new file mode 100644 index 0000000..f98b4ae --- /dev/null +++ b/patches.suse/0002-block-check-queue-s-limits.discard_granularity-in-__.patch @@ -0,0 +1,102 @@ +From b35fd7422c2f8e04496f5a770bd4e1a205414b3f Mon Sep 17 00:00:00 2001 +From: Coly Li +Date: Thu, 6 Aug 2020 01:25:03 +0800 +Subject: [PATCH] block: check queue's limits.discard_granularity in + __blkdev_issue_discard() +Git-commit: b35fd7422c2f8e04496f5a770bd4e1a205414b3f +Patch-mainline: v5.9-rc1 +References: bsc#1152148 + +If create a loop device with a backing NVMe SSD, current loop device +driver doesn't correctly set its queue's limits.discard_granularity and +leaves it as 0. If a discard request at LBA 0 on this loop device, in +__blkdev_issue_discard() the calculated req_sects will be 0, and a zero +length discard request will trigger a BUG() panic in generic block layer +code at block/blk-mq.c:563. + +[ 955.565006][ C39] ------------[ cut here ]------------ +[ 955.559660][ C39] invalid opcode: 0000 [#1] SMP NOPTI +[ 955.622171][ C39] CPU: 39 PID: 248 Comm: ksoftirqd/39 Tainted: G E 5.8.0-default+ #40 +[ 955.622171][ C39] Hardware name: Lenovo ThinkSystem SR650 -[7X05CTO1WW]-/-[7X05CTO1WW]-, BIOS -[IVE160M-2.70]- 07/17/2020 +[ 955.622175][ C39] RIP: 0010:blk_mq_end_request+0x107/0x110 +[ 955.622177][ C39] Code: 48 8b 03 e9 59 ff ff ff 48 89 df 5b 5d 41 5c e9 9f ed ff ff 48 8b 35 98 3c f4 00 48 83 c7 10 48 83 c6 19 e8 cb 56 c9 ff eb cb <0f> 0b 0f 1f 80 00 00 00 00 0f 1f 44 00 00 55 48 89 e5 41 56 41 54 +[ 955.622179][ C39] RSP: 0018:ffffb1288701fe28 EFLAGS: 00010202 +[ 955.749277][ C39] RAX: 0000000000000001 RBX: ffff956fffba5080 RCX: 0000000000004003 +[ 955.749278][ C39] RDX: 0000000000000003 RSI: 0000000000000000 RDI: 0000000000000000 +[ 955.749279][ C39] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 +[ 955.749279][ C39] R10: ffffb1288701fd28 R11: 0000000000000001 R12: ffffffffa8e05160 +[ 955.749280][ C39] R13: 0000000000000004 R14: 0000000000000004 R15: ffffffffa7ad3a1e +[ 955.749281][ C39] FS: 0000000000000000(0000) GS:ffff95bfbda00000(0000) knlGS:0000000000000000 +[ 955.749282][ C39] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 955.749282][ C39] CR2: 00007f6f0ef766a8 CR3: 0000005a37012002 CR4: 00000000007606e0 +[ 955.749283][ C39] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +[ 955.749284][ C39] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +[ 955.749284][ C39] PKRU: 55555554 +[ 955.749285][ C39] Call Trace: +[ 955.749290][ C39] blk_done_softirq+0x99/0xc0 +[ 957.550669][ C39] __do_softirq+0xd3/0x45f +[ 957.550677][ C39] ? smpboot_thread_fn+0x2f/0x1e0 +[ 957.550679][ C39] ? smpboot_thread_fn+0x74/0x1e0 +[ 957.550680][ C39] ? smpboot_thread_fn+0x14e/0x1e0 +[ 957.550684][ C39] run_ksoftirqd+0x30/0x60 +[ 957.550687][ C39] smpboot_thread_fn+0x149/0x1e0 +[ 957.886225][ C39] ? sort_range+0x20/0x20 +[ 957.886226][ C39] kthread+0x137/0x160 +[ 957.886228][ C39] ? kthread_park+0x90/0x90 +[ 957.886231][ C39] ret_from_fork+0x22/0x30 +[ 959.117120][ C39] ---[ end trace 3dacdac97e2ed164 ]--- + +This is the procedure to reproduce the panic, + # modprobe scsi_debug delay=0 dev_size_mb=2048 max_queue=1 + # losetup -f /dev/nvme0n1 --direct-io=on + # blkdiscard /dev/loop0 -o 0 -l 0x200 + +This patch fixes the issue by checking q->limits.discard_granularity in +__blkdev_issue_discard() before composing the discard bio. If the value +is 0, then prints a warning oops information and returns -EOPNOTSUPP to +the caller to indicate that this buggy device driver doesn't support +discard request. + +Fixes: 9b15d109a6b2 ("block: improve discard bio alignment in __blkdev_issue_discard()") +Fixes: c52abf563049 ("loop: Better discard support for block devices") +Reported-and-suggested-by: Ming Lei +Signed-off-by: Coly Li +Reviewed-by: Ming Lei +Reviewed-by: Hannes Reinecke +Reviewed-by: Jack Wang +Cc: Bart Van Assche +Cc: Christoph Hellwig +Cc: Darrick J. Wong +Cc: Enzo Matsumiya +Cc: Evan Green +Cc: Jens Axboe +Cc: Martin K. Petersen +Cc: Xiao Ni +Signed-off-by: Jens Axboe +--- + block/blk-lib.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/block/blk-lib.c b/block/blk-lib.c +index 019e09bb9c0e..0d1811e57ac7 100644 +--- a/block/blk-lib.c ++++ b/block/blk-lib.c +@@ -47,6 +47,15 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + op = REQ_OP_DISCARD; + } + ++ /* In case the discard granularity isn't set by buggy device driver */ ++ if (WARN_ON_ONCE(!q->limits.discard_granularity)) { ++ char dev_name[BDEVNAME_SIZE]; ++ ++ bdevname(bdev, dev_name); ++ pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name); ++ return -EOPNOTSUPP; ++ } ++ + bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; + if ((sector | nr_sects) & bs_mask) + return -EINVAL; +-- +2.26.2 + diff --git a/patches.suse/0003-net-stmmac-Fix-RX-packet-size-8191.patch b/patches.suse/0003-net-stmmac-Fix-RX-packet-size-8191.patch new file mode 100644 index 0000000..3f4655d --- /dev/null +++ b/patches.suse/0003-net-stmmac-Fix-RX-packet-size-8191.patch @@ -0,0 +1,102 @@ +From dcc67c8db543fcd4eb7ddaa6c70be15fa4d0bbbd Mon Sep 17 00:00:00 2001 +From: Thor Thayer +Date: Thu, 8 Nov 2018 11:42:14 -0600 +Subject: [PATCH 03/14] net: stmmac: Fix RX packet size > 8191 +Git-commit: 8137b6ef0ce469154e5cf19f8e7fe04d9a72ac5e +Patch-mainline: v4.20-rc2 +References: git-fixes + +Ping problems with packets > 8191 as shown: + +PING 192.168.1.99 (192.168.1.99) 8150(8178) bytes of data. +8158 bytes from 192.168.1.99: icmp_seq=1 ttl=64 time=0.669 ms +wrong data byte 8144 should be 0xd0 but was 0x0 +16 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f + 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f +%< ---------------snip-------------------------------------- +8112 b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf + c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf +8144 0 0 0 0 d0 d1 + ^^^^^^^ +Notice the 4 bytes of 0 before the expected byte of d0. + +Databook notes that the RX buffer must be a multiple of 4/8/16 +bytes [1]. + +Update the DMA Buffer size define to 8188 instead of 8192. Remove +the -1 from the RX buffer size allocations and use the new +DMA Buffer size directly. + +[1] Synopsys DesignWare Cores Ethernet MAC Universal v3.70a + [section 8.4.2 - Table 8-24] + +Tested on SoCFPGA Stratix10 with ping sweep from 100 to 8300 byte packets. + +Fixes: 286a83721720 ("stmmac: add CHAINED descriptor mode support (V4)") +Suggested-by: Jose Abreu +Signed-off-by: Thor Thayer +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/stmicro/stmmac/common.h | 3 ++- + drivers/net/ethernet/stmicro/stmmac/descs_com.h | 2 +- + drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 2 +- + drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 2 +- + 4 files changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h +index b7ce3fbb5375..2e5db58f09d0 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/common.h ++++ b/drivers/net/ethernet/stmicro/stmmac/common.h +@@ -340,7 +340,8 @@ struct dma_features { + + /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ + #define BUF_SIZE_16KiB 16384 +-#define BUF_SIZE_8KiB 8192 ++/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */ ++#define BUF_SIZE_8KiB 8188 + #define BUF_SIZE_4KiB 4096 + #define BUF_SIZE_2KiB 2048 + +diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h +index ca9d7e48034c..40d6356a7e73 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h +@@ -31,7 +31,7 @@ + /* Enhanced descriptors */ + static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) + { +- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1) ++ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK); + +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +index 7546b3664113..e9b03fffa377 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) + { + p->des0 |= cpu_to_le32(RDES0_OWN); +- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); ++ p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); + + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_rx_set_on_chain(p); +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +index 34253c2de156..d4c3bf78d928 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +@@ -144,7 +144,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) + static int stmmac_set_16kib_bfsize(int mtu) + { + int ret = 0; +- if (unlikely(mtu >= BUF_SIZE_8KiB)) ++ if (unlikely(mtu > BUF_SIZE_8KiB)) + ret = BUF_SIZE_16KiB; + return ret; + } +-- +2.16.4 + diff --git a/patches.suse/0004-net-qcom-emac-add-missed-clk_disable_unprepare-in-er.patch b/patches.suse/0004-net-qcom-emac-add-missed-clk_disable_unprepare-in-er.patch new file mode 100644 index 0000000..9fdbd47 --- /dev/null +++ b/patches.suse/0004-net-qcom-emac-add-missed-clk_disable_unprepare-in-er.patch @@ -0,0 +1,57 @@ +From cbb50d338a0fd0a146ffe3c05d6610c728f671b4 Mon Sep 17 00:00:00 2001 +From: Wang Hai +Date: Mon, 10 Aug 2020 10:57:05 +0800 +Subject: [PATCH 04/14] net: qcom/emac: add missed clk_disable_unprepare in + error path of emac_clks_phase1_init +Git-commit: 50caa777a3a24d7027748e96265728ce748b41ef +Patch-mainline: v5.9-rc1 +References: git-fixes + +Fix the missing clk_disable_unprepare() before return +from emac_clks_phase1_init() in the error handling case. + +Fixes: b9b17debc69d ("net: emac: emac gigabit ethernet controller driver") +Reported-by: Hulk Robot +Signed-off-by: Wang Hai +Acked-by: Timur Tabi +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/qualcomm/emac/emac.c | 17 ++++++++++++++--- + 1 file changed, 14 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c +index 98a326faea29..3df1f8e960c0 100644 +--- a/drivers/net/ethernet/qualcomm/emac/emac.c ++++ b/drivers/net/ethernet/qualcomm/emac/emac.c +@@ -490,13 +490,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, + + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); + if (ret) +- return ret; ++ goto disable_clk_axi; + + ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); + if (ret) +- return ret; ++ goto disable_clk_cfg_ahb; ++ ++ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); ++ if (ret) ++ goto disable_clk_cfg_ahb; + +- return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); ++ return 0; ++ ++disable_clk_cfg_ahb: ++ clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); ++disable_clk_axi: ++ clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); ++ ++ return ret; + } + + /* Enable clocks; needs emac_clks_phase1_init to be called before */ +-- +2.16.4 + diff --git a/patches.suse/0005-net-mlx5-Delete-extra-dump-stack-that-gives-nothing.patch b/patches.suse/0005-net-mlx5-Delete-extra-dump-stack-that-gives-nothing.patch new file mode 100644 index 0000000..c0e74f0 --- /dev/null +++ b/patches.suse/0005-net-mlx5-Delete-extra-dump-stack-that-gives-nothing.patch @@ -0,0 +1,109 @@ +From 2c28f04dde06f42be3e948b35e79cad0ec4e5e7f Mon Sep 17 00:00:00 2001 +From: Leon Romanovsky +Date: Sun, 19 Jul 2020 11:04:30 +0300 +Subject: [PATCH 05/14] net/mlx5: Delete extra dump stack that gives nothing +Git-commit: 6c4e9bcfb48933d533ff975e152757991556294a +Patch-mainline: v5.9-rc1 +References: git-fixes + +The WARN_*() macros are intended to catch impossible situations +from the SW point of view. They gave a little in case HW<->SW interface +is out-of-sync. + +Such out-of-sync scenario can be due to SW errors that are not part +of this flow or because some HW errors, where dump stack won't help +either. + +This specific WARN_ON() is useless because mlx5_core code is prepared +to handle such situations and will unfold everything correctly while +providing enough information to the users to understand why FS is not +working. + +WARNING: CPU: 0 PID: 3222 at drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:825 connect_fts_in_prio.isra.20+0x1dd/0x260 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:825 +Kernel panic - not syncing: panic_on_warn set ... +CPU: 0 PID: 3222 Comm: syz-executor861 Not tainted 5.5.0-rc6+ #2 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS +rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014 +Call Trace: + __dump_stack linux/lib/dump_stack.c:77 [inline] + dump_stack+0x94/0xce linux/lib/dump_stack.c:118 + panic+0x234/0x56f linux/kernel/panic.c:221 + __warn+0x1cc/0x1e1 linux/kernel/panic.c:582 + report_bug+0x200/0x310 linux/lib/bug.c:195 + fixup_bug.part.11+0x32/0x80 linux/arch/x86/kernel/traps.c:174 + fixup_bug linux/arch/x86/kernel/traps.c:273 [inline] + do_error_trap+0xd3/0x100 linux/arch/x86/kernel/traps.c:267 + do_invalid_op+0x31/0x40 linux/arch/x86/kernel/traps.c:286 + invalid_op+0x1e/0x30 linux/arch/x86/entry/entry_64.S:1027 +RIP: 0010:connect_fts_in_prio.isra.20+0x1dd/0x260 +linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:825 +Code: 00 00 48 c7 c2 60 8c 31 84 48 c7 c6 00 81 31 84 48 8b 38 e8 3c a8 +cb ff 41 83 fd 01 8b 04 24 0f 8e 29 ff ff ff e8 83 7b bc fe <0f> 0b 8b +04 24 e9 1a ff ff ff 89 04 24 e8 c1 20 e0 fe 8b 04 24 eb +RSP: 0018:ffffc90004bb7858 EFLAGS: 00010293 +RAX: ffff88805de98e80 RBX: 0000000000000c96 RCX: ffffffff827a853d +RDX: 0000000000000000 RSI: 0000000000000000 RDI: fffff52000976efa +RBP: 0000000000000007 R08: ffffed100da060e3 R09: ffffed100da060e3 +R10: 0000000000000001 R11: ffffed100da060e2 R12: dffffc0000000000 +R13: 0000000000000002 R14: ffff8880683a1a10 R15: ffffed100d07bc1c + connect_prev_fts linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:844 [inline] + connect_flow_table linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:975 [inline] + __mlx5_create_flow_table+0x8f8/0x1710 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:1064 + mlx5_create_flow_table linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:1094 [inline] + mlx5_create_auto_grouped_flow_table+0xe1/0x210 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:1136 + _get_prio linux/drivers/infiniband/hw/mlx5/main.c:3286 [inline] + get_flow_table+0x2ea/0x760 linux/drivers/infiniband/hw/mlx5/main.c:3376 + mlx5_ib_create_flow+0x331/0x11c0 linux/drivers/infiniband/hw/mlx5/main.c:3896 + ib_uverbs_ex_create_flow+0x13e8/0x1b40 linux/drivers/infiniband/core/uverbs_cmd.c:3311 + ib_uverbs_write+0xaa5/0xdf0 linux/drivers/infiniband/core/uverbs_main.c:769 + __vfs_write+0x7c/0x100 linux/fs/read_write.c:494 + vfs_write+0x168/0x4a0 linux/fs/read_write.c:558 + ksys_write+0xc8/0x200 linux/fs/read_write.c:611 + do_syscall_64+0x9c/0x390 linux/arch/x86/entry/common.c:294 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 +RIP: 0033:0x45a059 +Code: 00 00 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 89 f8 48 89 +f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 +f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 +RSP: 002b:00007fcc17564c98 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 +RAX: ffffffffffffffda RBX: 00007fcc17564ca0 RCX: 000000000045a059 +RDX: 0000000000000030 RSI: 00000000200003c0 RDI: 0000000000000005 +RBP: 0000000000000007 R08: 0000000000000002 R09: 0000000000003131 +R10: 0000000000000000 R11: 0000000000000246 R12: 00000000006e636c +R13: 0000000000000000 R14: 00000000006e6360 R15: 00007ffdcbdaf6a0 +Dumping ftrace buffer: + (ftrace buffer empty) +Kernel Offset: disabled +Rebooting in 1 seconds.. + +Fixes: f90edfd279f3 ("net/mlx5_core: Connect flow tables") +Reviewed-by: Maor Gottlieb +Reviewed-by: Mark Bloch +Signed-off-by: Leon Romanovsky +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 13080def90cf..69ecbce8773d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -776,11 +776,10 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, + i++; + err = root->cmds->modify_flow_table(dev, iter, ft); + if (err) { +- mlx5_core_warn(dev, "Failed to modify flow table %d\n", +- iter->id); ++ mlx5_core_err(dev, ++ "Failed to modify flow table id %d, type %d, err %d\n", ++ iter->id, iter->type, err); + /* The driver is out of sync with the FW */ +- if (i > 1) +- WARN_ON(true); + return err; + } + } +-- +2.16.4 + diff --git a/patches.suse/0006-fsl-fman-fix-eth-hash-table-allocation.patch b/patches.suse/0006-fsl-fman-fix-eth-hash-table-allocation.patch new file mode 100644 index 0000000..2570449 --- /dev/null +++ b/patches.suse/0006-fsl-fman-fix-eth-hash-table-allocation.patch @@ -0,0 +1,37 @@ +From aa18b985a9cde58f23151e96af7a0b65dcf5b727 Mon Sep 17 00:00:00 2001 +From: Florinel Iordache +Date: Mon, 3 Aug 2020 10:07:34 +0300 +Subject: [PATCH 06/14] fsl/fman: fix eth hash table allocation +Git-commit: 3207f715c34317d08e798e11a10ce816feb53c0f +Patch-mainline: v5.9-rc1 +References: git-fixes + +Fix memory allocation for ethernet address hash table. +The code was wrongly allocating an array for eth hash table which +is incorrect because this is the main structure for eth hash table +(struct eth_hash_t) that contains inside a number of elements. + +Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") +Signed-off-by: Florinel Iordache +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/freescale/fman/fman_mac.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h +index dd6d0526f6c1..19f327efdaff 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_mac.h ++++ b/drivers/net/ethernet/freescale/fman/fman_mac.h +@@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) + struct eth_hash_t *hash; + + /* Allocate address hash table */ +- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); ++ hash = kmalloc(sizeof(*hash), GFP_KERNEL); + if (!hash) + return NULL; + +-- +2.16.4 + diff --git a/patches.suse/0007-fsl-fman-check-dereferencing-null-pointer.patch b/patches.suse/0007-fsl-fman-check-dereferencing-null-pointer.patch new file mode 100644 index 0000000..f451481 --- /dev/null +++ b/patches.suse/0007-fsl-fman-check-dereferencing-null-pointer.patch @@ -0,0 +1,71 @@ +From c9c5d465c27ba8fc6f736e096abd1d689d30090c Mon Sep 17 00:00:00 2001 +From: Florinel Iordache +Date: Mon, 3 Aug 2020 10:07:33 +0300 +Subject: [PATCH 07/14] fsl/fman: check dereferencing null pointer +Git-commit: cc5d229a122106733a85c279d89d7703f21e4d4f +Patch-mainline: v5.9-rc1 +References: git-fixes + +Add a safe check to avoid dereferencing null pointer + +Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") +Signed-off-by: Florinel Iordache +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/freescale/fman/fman_dtsec.c | 4 ++-- + drivers/net/ethernet/freescale/fman/fman_memac.c | 2 +- + drivers/net/ethernet/freescale/fman/fman_tgec.c | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c +index 1ca543ac8f2c..d2de9ea80c43 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c ++++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c +@@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) + list_for_each(pos, + &dtsec->multicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +@@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) + list_for_each(pos, + &dtsec->unicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c +index 41c6fa200e74..998647a241d9 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_memac.c ++++ b/drivers/net/ethernet/freescale/fman/fman_memac.c +@@ -986,7 +986,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) + + list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c +index f75b9c11b2d2..ac5a281e0ec3 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c ++++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c +@@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) + + list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +-- +2.16.4 + diff --git a/patches.suse/0008-fsl-fman-fix-unreachable-code.patch b/patches.suse/0008-fsl-fman-fix-unreachable-code.patch new file mode 100644 index 0000000..7f92135 --- /dev/null +++ b/patches.suse/0008-fsl-fman-fix-unreachable-code.patch @@ -0,0 +1,34 @@ +From 8bae108fb8d47c477313b3e61144de87908e7d30 Mon Sep 17 00:00:00 2001 +From: Florinel Iordache +Date: Mon, 3 Aug 2020 10:07:32 +0300 +Subject: [PATCH 08/14] fsl/fman: fix unreachable code +Git-commit: cc79fd8f557767de90ff199d3b6fb911df43160a +Patch-mainline: v5.9-rc1 +References: git-fixes + +The parameter 'priority' is incorrectly forced to zero which ultimately +induces logically dead code in the subsequent lines. + +Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") +Signed-off-by: Florinel Iordache +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/freescale/fman/fman_memac.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c +index 998647a241d9..0aa2004c1392 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_memac.c ++++ b/drivers/net/ethernet/freescale/fman/fman_memac.c +@@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, + + tmp = ioread32be(®s->command_config); + tmp &= ~CMD_CFG_PFC_MODE; +- priority = 0; + + iowrite32be(tmp, ®s->command_config); + +-- +2.16.4 + diff --git a/patches.suse/0009-fsl-fman-fix-dereference-null-return-value.patch b/patches.suse/0009-fsl-fman-fix-dereference-null-return-value.patch new file mode 100644 index 0000000..b095f8a --- /dev/null +++ b/patches.suse/0009-fsl-fman-fix-dereference-null-return-value.patch @@ -0,0 +1,49 @@ +From 5f5bd17ce1a189fc58d0d3ece5f89a36557bbc1d Mon Sep 17 00:00:00 2001 +From: Florinel Iordache +Date: Mon, 3 Aug 2020 10:07:31 +0300 +Subject: [PATCH 09/14] fsl/fman: fix dereference null return value +Git-commit: 0572054617f32670abab4b4e89a876954d54b704 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Check before using returned value to avoid dereferencing null pointer. + +Fixes: 18a6c85fcc78 ("fsl/fman: Add FMan Port Support") +Signed-off-by: Florinel Iordache +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/freescale/fman/fman_port.c | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c +index d8d9c1462e57..a2b649fb5407 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_port.c ++++ b/drivers/net/ethernet/freescale/fman/fman_port.c +@@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev) + struct fman_port *port; + struct fman *fman; + struct device_node *fm_node, *port_node; ++ struct platform_device *fm_pdev; + struct resource res; + struct resource *dev_res; + u32 val; +@@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev) + goto return_err; + } + +- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); ++ fm_pdev = of_find_device_by_node(fm_node); + of_node_put(fm_node); ++ if (!fm_pdev) { ++ err = -EINVAL; ++ goto return_err; ++ } ++ ++ fman = dev_get_drvdata(&fm_pdev->dev); + if (!fman) { + err = -EINVAL; + goto return_err; +-- +2.16.4 + diff --git a/patches.suse/0010-fsl-fman-use-32-bit-unsigned-integer.patch b/patches.suse/0010-fsl-fman-use-32-bit-unsigned-integer.patch new file mode 100644 index 0000000..053fa00 --- /dev/null +++ b/patches.suse/0010-fsl-fman-use-32-bit-unsigned-integer.patch @@ -0,0 +1,40 @@ +From 4212fc62224cd93125c8a6e0d6f9bbc18ec0b3b7 Mon Sep 17 00:00:00 2001 +From: Florinel Iordache +Date: Mon, 3 Aug 2020 10:07:30 +0300 +Subject: [PATCH 10/14] fsl/fman: use 32-bit unsigned integer +Git-commit: 99f47abd9f7bf6e365820d355dc98f6955a562df +Patch-mainline: v5.9-rc1 +References: git-fixes + +Potentially overflowing expression (ts_freq << 16 and intgr << 16) +declared as type u32 (32-bit unsigned) is evaluated using 32-bit +arithmetic and then used in a context that expects an expression of +type u64 (64-bit unsigned) which ultimately is used as 16-bit +unsigned by typecasting to u16. Fixed by using an unsigned 32-bit +integer since the value is truncated anyway in the end. + +Fixes: 414fd46e7762 ("fsl/fman: Add FMan support") +Signed-off-by: Florinel Iordache +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/freescale/fman/fman.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c +index d83076f8dfe2..b1342365b63a 100644 +--- a/drivers/net/ethernet/freescale/fman/fman.c ++++ b/drivers/net/ethernet/freescale/fman/fman.c +@@ -1391,8 +1391,7 @@ static void enable_time_stamp(struct fman *fman) + { + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + u16 fm_clk_freq = fman->state->fm_clk_freq; +- u32 tmp, intgr, ts_freq; +- u64 frac; ++ u32 tmp, intgr, ts_freq, frac; + + ts_freq = (u32)(1 << fman->state->count1_micro_bit); + /* configure timestamp so that bit 8 will count 1 microsecond +-- +2.16.4 + diff --git a/patches.suse/0011-net-spider_net-Fix-the-size-used-in-a-dma_free_coher.patch b/patches.suse/0011-net-spider_net-Fix-the-size-used-in-a-dma_free_coher.patch new file mode 100644 index 0000000..65fdb59 --- /dev/null +++ b/patches.suse/0011-net-spider_net-Fix-the-size-used-in-a-dma_free_coher.patch @@ -0,0 +1,39 @@ +From 6dba01c73ab97532467197bb660e729d194258de Mon Sep 17 00:00:00 2001 +From: Christophe JAILLET +Date: Sun, 2 Aug 2020 15:53:33 +0200 +Subject: [PATCH 11/14] net: spider_net: Fix the size used in a + 'dma_free_coherent()' call +Git-commit: 36f28f7687a9ce665479cce5d64ce7afaa9e77ae +Patch-mainline: v5.9-rc1 +References: git-fixes + +Update the size used in 'dma_free_coherent()' in order to match the one +used in the corresponding 'dma_alloc_coherent()', in +'spider_net_init_chain()'. + +Fixes: d4ed8f8d1fb7 ("Spidernet DMA coalescing") +Signed-off-by: Christophe JAILLET +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/toshiba/spider_net.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c +index cec9e70ab995..ec8aff31623c 100644 +--- a/drivers/net/ethernet/toshiba/spider_net.c ++++ b/drivers/net/ethernet/toshiba/spider_net.c +@@ -296,8 +296,8 @@ spider_net_free_chain(struct spider_net_card *card, + descr = descr->next; + } while (descr != chain->ring); + +- dma_free_coherent(&card->pdev->dev, chain->num_desc, +- chain->hwring, chain->dma_addr); ++ dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), ++ chain->hwring, chain->dma_addr); + } + + /** +-- +2.16.4 + diff --git a/patches.suse/0012-net-ethernet-aquantia-Fix-wrong-return-value.patch b/patches.suse/0012-net-ethernet-aquantia-Fix-wrong-return-value.patch new file mode 100644 index 0000000..40c6db6 --- /dev/null +++ b/patches.suse/0012-net-ethernet-aquantia-Fix-wrong-return-value.patch @@ -0,0 +1,36 @@ +From e01fa685df7e7827a416d12ef3bb75a12c434b8f Mon Sep 17 00:00:00 2001 +From: Tianjia Zhang +Date: Sun, 2 Aug 2020 19:15:37 +0800 +Subject: [PATCH 12/14] net: ethernet: aquantia: Fix wrong return value +Git-commit: 0470a48880f8bc42ce26962b79c7b802c5a695ec +Patch-mainline: v5.9-rc1 +References: git-fixes + +In function hw_atl_a0_hw_multicast_list_set(), when an invalid +request is encountered, a negative error code should be returned. + +Fixes: bab6de8fd180b ("net: ethernet: aquantia: Atlantic A0 and B0 specific functions") +Cc: David VomLehn +Signed-off-by: Tianjia Zhang +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +index 47167aa3c588..372b81bcb0c6 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +@@ -729,7 +729,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, + int err = 0; + + if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { +- err = EBADRQC; ++ err = -EBADRQC; + goto err_exit; + } + for (self->aq_nic_cfg->mc_list_count = 0U; +-- +2.16.4 + diff --git a/patches.suse/0013-net-mvpp2-fix-memory-leak-in-mvpp2_rx.patch b/patches.suse/0013-net-mvpp2-fix-memory-leak-in-mvpp2_rx.patch new file mode 100644 index 0000000..812d199 --- /dev/null +++ b/patches.suse/0013-net-mvpp2-fix-memory-leak-in-mvpp2_rx.patch @@ -0,0 +1,34 @@ +From 8f20f10f31757d9e7fbbbbc1c8c688fb2f5ec7d0 Mon Sep 17 00:00:00 2001 +From: Lorenzo Bianconi +Date: Fri, 31 Jul 2020 10:38:32 +0200 +Subject: [PATCH 13/14] net: mvpp2: fix memory leak in mvpp2_rx +Git-commit: d6526926de7397a97308780911565e31a6b67b59 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Release skb memory in mvpp2_rx() if mvpp2_rx_refill routine fails + +Fixes: b5015854674b ("net: mvpp2: fix refilling BM pools in RX path") +Signed-off-by: Lorenzo Bianconi +Acked-by: Matteo Croce +Signed-off-by: David S. Miller +Acked-by: Denis Kirjanov +--- + drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +index 8c3236a5d974..2f9ed98ad869 100644 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +@@ -2735,6 +2735,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + err = mvpp2_rx_refill(port, bm_pool, pool); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); ++ dev_kfree_skb_any(skb); + goto err_drop_frame; + } + +-- +2.16.4 + diff --git a/patches.suse/0014-net-mlx5e-vxlan-Use-RCU-for-vxlan-table-lookup.patch b/patches.suse/0014-net-mlx5e-vxlan-Use-RCU-for-vxlan-table-lookup.patch new file mode 100644 index 0000000..74ed105 --- /dev/null +++ b/patches.suse/0014-net-mlx5e-vxlan-Use-RCU-for-vxlan-table-lookup.patch @@ -0,0 +1,161 @@ +From a7ac12760d59a9065826b31d6f5a2c5742ef85d5 Mon Sep 17 00:00:00 2001 +From: Saeed Mahameed +Date: Fri, 15 May 2020 17:09:05 -0700 +Subject: [PATCH 14/14] net/mlx5e: vxlan: Use RCU for vxlan table lookup +Git-commit: 7a64ca862ac96d5e78a59bd57549034134ee0949 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Remove the spinlock protecting the vxlan table and use RCU instead. +This will improve performance as it will eliminate contention on data +path cores. + +Fixes: b3f63c3d5e2c ("net/mlx5e: Add netdev support for VXLAN tunneling") +Signed-off-by: Saeed Mahameed +Reviewed-by: Maxim Mikityanskiy +Acked-by: Denis Kirjanov +--- + .../net/ethernet/mellanox/mlx5/core/lib/vxlan.c | 65 +++++++++------------- + 1 file changed, 27 insertions(+), 38 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +index 9a8fd762167b..9d30adbe1657 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +@@ -38,7 +38,6 @@ + + struct mlx5_vxlan { + struct mlx5_core_dev *mdev; +- spinlock_t lock; /* protect vxlan table */ + /* max_num_ports is usuallly 4, 16 buckets is more than enough */ + DECLARE_HASHTABLE(htable, 4); + int num_ports; +@@ -78,45 +77,46 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + } + +-static struct mlx5_vxlan_port* +-mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port) ++struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) + { +- struct mlx5_vxlan_port *vxlanp; ++ struct mlx5_vxlan_port *retptr = NULL, *vxlanp; + +- hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) { +- if (vxlanp->udp_port == port) +- return vxlanp; +- } ++ if (!mlx5_vxlan_allowed(vxlan)) ++ return NULL; + +- return NULL; ++ rcu_read_lock(); ++ hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port) ++ if (vxlanp->udp_port == port) { ++ retptr = vxlanp; ++ break; ++ } ++ rcu_read_unlock(); ++ ++ return retptr; + } + +-struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) ++static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) + { + struct mlx5_vxlan_port *vxlanp; + +- if (!mlx5_vxlan_allowed(vxlan)) +- return NULL; +- +- spin_lock_bh(&vxlan->lock); +- vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); +- spin_unlock_bh(&vxlan->lock); +- +- return vxlanp; ++ hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) ++ if (vxlanp->udp_port == port) ++ return vxlanp; ++ return NULL; + } + + int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) + { + struct mlx5_vxlan_port *vxlanp; +- int ret = -ENOSPC; ++ int ret = 0; + +- vxlanp = mlx5_vxlan_lookup_port(vxlan, port); ++ mutex_lock(&vxlan->sync_lock); ++ vxlanp = vxlan_lookup_port(vxlan, port); + if (vxlanp) { + atomic_inc(&vxlanp->refcount); +- return 0; ++ goto unlock; + } + +- mutex_lock(&vxlan->sync_lock); + if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { + mlx5_core_info(vxlan->mdev, + "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", +@@ -138,9 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) + vxlanp->udp_port = port; + atomic_set(&vxlanp->refcount, 1); + +- spin_lock_bh(&vxlan->lock); +- hash_add(vxlan->htable, &vxlanp->hlist, port); +- spin_unlock_bh(&vxlan->lock); ++ hash_add_rcu(vxlan->htable, &vxlanp->hlist, port); + + vxlan->num_ports++; + mutex_unlock(&vxlan->sync_lock); +@@ -157,34 +155,26 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) + int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) + { + struct mlx5_vxlan_port *vxlanp; +- bool remove = false; + int ret = 0; + + mutex_lock(&vxlan->sync_lock); + +- spin_lock_bh(&vxlan->lock); +- vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); ++ vxlanp = vxlan_lookup_port(vxlan, port); + if (!vxlanp) { + ret = -ENOENT; + goto out_unlock; + } + + if (atomic_dec_and_test(&vxlanp->refcount)) { +- hash_del(&vxlanp->hlist); +- remove = true; +- } +- +-out_unlock: +- spin_unlock_bh(&vxlan->lock); +- +- if (remove) { ++ hash_del_rcu(&vxlanp->hlist); ++ synchronize_rcu(); + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); + kfree(vxlanp); + vxlan->num_ports--; + } + ++out_unlock: + mutex_unlock(&vxlan->sync_lock); +- + return ret; + } + +@@ -201,7 +191,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) + + vxlan->mdev = mdev; + mutex_init(&vxlan->sync_lock); +- spin_lock_init(&vxlan->lock); + hash_init(vxlan->htable); + + /* Hardware adds 4789 by default */ +-- +2.16.4 + diff --git a/patches.suse/ASoC-intel-Fix-memleak-in-sst_media_open.patch b/patches.suse/ASoC-intel-Fix-memleak-in-sst_media_open.patch new file mode 100644 index 0000000..cd3f1e1 --- /dev/null +++ b/patches.suse/ASoC-intel-Fix-memleak-in-sst_media_open.patch @@ -0,0 +1,50 @@ +From 062fa09f44f4fb3776a23184d5d296b0c8872eb9 Mon Sep 17 00:00:00 2001 +From: Dinghao Liu +Date: Thu, 13 Aug 2020 16:41:10 +0800 +Subject: [PATCH] ASoC: intel: Fix memleak in sst_media_open +Git-commit: 062fa09f44f4fb3776a23184d5d296b0c8872eb9 +Patch-mainline: v5.9-rc2 +References: git-fixes + +When power_up_sst() fails, stream needs to be freed +just like when try_module_get() fails. However, current +code is returning directly and ends up leaking memory. + +Fixes: 0121327c1a68b ("ASoC: Intel: mfld-pcm: add control for powering up/down dsp") +Signed-off-by: Dinghao Liu +Acked-by: Pierre-Louis Bossart +Link: https://lore.kernel.org/r/20200813084112.26205-1-dinghao.liu@zju.edu.cn +Signed-off-by: Mark Brown +Acked-by: Takashi Iwai + +--- + sound/soc/intel/atom/sst-mfld-platform-pcm.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c +index 49b9f18472bc..b1cac7abdc0a 100644 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c +@@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, + + ret_val = power_up_sst(stream); + if (ret_val < 0) +- return ret_val; ++ goto out_power_up; + + /* Make sure, that the period size is always even */ + snd_pcm_hw_constraint_step(substream->runtime, 0, +@@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, + return snd_pcm_hw_constraint_integer(runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + out_ops: +- kfree(stream); + mutex_unlock(&sst_lock); ++out_power_up: ++ kfree(stream); + return ret_val; + } + +-- +2.16.4 + diff --git a/patches.suse/Input-psmouse-add-a-newline-when-printing-proto-by-s.patch b/patches.suse/Input-psmouse-add-a-newline-when-printing-proto-by-s.patch new file mode 100644 index 0000000..77dab58 --- /dev/null +++ b/patches.suse/Input-psmouse-add-a-newline-when-printing-proto-by-s.patch @@ -0,0 +1,39 @@ +From 4aec14de3a15cf9789a0e19c847f164776f49473 Mon Sep 17 00:00:00 2001 +From: Xiongfeng Wang +Date: Tue, 21 Jul 2020 22:24:07 -0700 +Subject: [PATCH] Input: psmouse - add a newline when printing 'proto' by sysfs +Git-commit: 4aec14de3a15cf9789a0e19c847f164776f49473 +Patch-mainline: v5.9-rc1 +References: git-fixes + +When I cat parameter 'proto' by sysfs, it displays as follows. It's +better to add a newline for easy reading. + +root@syzkaller:~# cat /sys/module/psmouse/parameters/proto +autoroot@syzkaller:~# + +Signed-off-by: Xiongfeng Wang +Link: https://lore.kernel.org/r/20200720073846.120724-1-wangxiongfeng2@huawei.com +Signed-off-by: Dmitry Torokhov +Acked-by: Takashi Iwai + +--- + drivers/input/mouse/psmouse-base.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c +index 527ae0b9a191..0b4a3039f312 100644 +--- a/drivers/input/mouse/psmouse-base.c ++++ b/drivers/input/mouse/psmouse-base.c +@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) + { + int type = *((unsigned int *)kp->arg); + +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); + } + + static int __init psmouse_init(void) +-- +2.16.4 + diff --git a/patches.suse/PM-sleep-core-Fix-the-handling-of-pending-runtime-re.patch b/patches.suse/PM-sleep-core-Fix-the-handling-of-pending-runtime-re.patch new file mode 100644 index 0000000..3415306 --- /dev/null +++ b/patches.suse/PM-sleep-core-Fix-the-handling-of-pending-runtime-re.patch @@ -0,0 +1,86 @@ +From e3eb6e8fba65094328b8dca635d00de74ba75b45 Mon Sep 17 00:00:00 2001 +From: "Rafael J. Wysocki" +Date: Mon, 24 Aug 2020 19:35:31 +0200 +Subject: [PATCH] PM: sleep: core: Fix the handling of pending runtime resume requests +Git-commit: e3eb6e8fba65094328b8dca635d00de74ba75b45 +Patch-mainline: v5.9-rc3 +References: git-fixes + +It has been reported that system-wide suspend may be aborted in the +absence of any wakeup events due to unforseen interactions of it with +the runtume PM framework. + +One failing scenario is when there are multiple devices sharing an +ACPI power resource and runtime-resume needs to be carried out for +one of them during system-wide suspend (for example, because it needs +to be reconfigured before the whole system goes to sleep). In that +case, the runtime-resume of that device involves turning the ACPI +power resource "on" which in turn causes runtime-resume requests +to be queued up for all of the other devices sharing it. Those +requests go to the runtime PM workqueue which is frozen during +system-wide suspend, so they are not actually taken care of until +the resume of the whole system, but the pm_runtime_barrier() +call in __device_suspend() sees them and triggers system wakeup +events for them which then cause the system-wide suspend to be +aborted if wakeup source objects are in active use. + +Of course, the logic that leads to triggering those wakeup events is +questionable in the first place, because clearly there are cases in +which a pending runtime resume request for a device is not connected +to any real wakeup events in any way (like the one above). Moreover, +it is racy, because the device may be resuming already by the time +the pm_runtime_barrier() runs and so if the driver doesn't take care +of signaling the wakeup event as appropriate, it will be lost. +However, if the driver does take care of that, the extra +pm_wakeup_event() call in the core is redundant. + +Accordingly, drop the conditional pm_wakeup_event() call fron +__device_suspend() and make the latter call pm_runtime_barrier() +alone. Also modify the comment next to that call to reflect the new +code and extend it to mention the need to avoid unwanted interactions +between runtime PM and system-wide device suspend callbacks. + +Fixes: 1e2ef05bb8cf8 ("PM: Limit race conditions between runtime PM and system sleep (v2)") +Signed-off-by: Rafael J. Wysocki +Acked-by: Alan Stern +Reported-by: Utkarsh H Patel +Tested-by: Utkarsh H Patel +Tested-by: Pengfei Xu +Cc: All applicable +Acked-by: Takashi Iwai + +--- + drivers/base/power/main.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c +index 9dd85bea4026..205a06752ca9 100644 +--- a/drivers/base/power/main.c ++++ b/drivers/base/power/main.c +@@ -1606,13 +1606,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) + } + + /* +- * If a device configured to wake up the system from sleep states +- * has been suspended at run time and there's a resume request pending +- * for it, this is equivalent to the device signaling wakeup, so the +- * system suspend operation should be aborted. ++ * Wait for possible runtime PM transitions of the device in progress ++ * to complete and if there's a runtime resume request pending for it, ++ * resume it before proceeding with invoking the system-wide suspend ++ * callbacks for it. ++ * ++ * If the system-wide suspend callbacks below change the configuration ++ * of the device, they must disable runtime PM for it or otherwise ++ * ensure that its runtime-resume callbacks will not be confused by that ++ * change in case they are invoked going forward. + */ +- if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) +- pm_wakeup_event(dev, 0); ++ pm_runtime_barrier(dev); + + if (pm_wakeup_pending()) { + dev->power.direct_complete = false; +-- +2.16.4 + diff --git a/patches.suse/Revert-scsi-qla2xxx-Disable-T10-DIF-feature-with-FC-.patch b/patches.suse/Revert-scsi-qla2xxx-Disable-T10-DIF-feature-with-FC-.patch new file mode 100644 index 0000000..1d7a6ce --- /dev/null +++ b/patches.suse/Revert-scsi-qla2xxx-Disable-T10-DIF-feature-with-FC-.patch @@ -0,0 +1,37 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:14 -0700 +Subject: Revert "scsi: qla2xxx: Disable T10-DIF feature with FC-NVMe during + probe" +Patch-mainline: v5.9-rc2 +Git-commit: dca93232b361d260413933903cd4bdbd92ebcc7f +References: bsc#1171688 bsc#1174003 + +FCP T10-PI and NVMe features are independent of each other. This patch +allows both features to co-exist. + +This reverts commit 5da05a26b8305a625bc9d537671b981795b46dab. + +Link: https://lore.kernel.org/r/20200806111014.28434-12-njavali@marvell.com +Fixes: 5da05a26b830 ("scsi: qla2xxx: Disable T10-DIF feature with FC-NVMe during probe") +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_os.c | 4 ---- + 1 file changed, 4 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -2840,10 +2840,6 @@ qla2x00_probe_one(struct pci_dev *pdev, + /* This may fail but that's ok */ + pci_enable_pcie_error_reporting(pdev); + +- /* Turn off T10-DIF when FC-NVMe is enabled */ +- if (ql2xnvmeenable) +- ql2xenabledif = 0; +- + ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL); + if (!ha) { + ql_log_pci(ql_log_fatal, pdev, 0x0009, diff --git a/patches.suse/Revert-scsi-qla2xxx-Fix-crash-on-qla2x00_mailbox_com.patch b/patches.suse/Revert-scsi-qla2xxx-Fix-crash-on-qla2x00_mailbox_com.patch new file mode 100644 index 0000000..5285ac4 --- /dev/null +++ b/patches.suse/Revert-scsi-qla2xxx-Fix-crash-on-qla2x00_mailbox_com.patch @@ -0,0 +1,40 @@ +From: Saurav Kashyap +Date: Thu, 6 Aug 2020 04:10:13 -0700 +Subject: Revert "scsi: qla2xxx: Fix crash on qla2x00_mailbox_command" +Patch-mainline: v5.9-rc2 +Git-commit: de7e6194301ad31c4ce95395eb678e51a1b907e5 +References: bsc#1171688 bsc#1174003 + +FCoE adapter initialization failed for ISP8021 with the following patch +applied. In addition, reproduction of the issue the patch originally tried +to address has been unsuccessful. + +This reverts commit 3cb182b3fa8b7a61f05c671525494697cba39c6a. + +Link: https://lore.kernel.org/r/20200806111014.28434-11-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Saurav Kashyap +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_mbx.c | 8 -------- + 1 file changed, 8 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -334,14 +334,6 @@ qla2x00_mailbox_command(scsi_qla_host_t + if (time_after(jiffies, wait_time)) + break; + +- /* +- * Check if it's UNLOADING, cause we cannot poll in +- * this case, or else a NULL pointer dereference +- * is triggered. +- */ +- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) +- return QLA_FUNCTION_TIMEOUT; +- + /* Check for pending interrupts. */ + qla2x00_poll(ha->rsp_q_map[0]); + diff --git a/patches.suse/USB-Ignore-UAS-for-JMicron-JMS567-ATA-ATAPI-Bridge.patch b/patches.suse/USB-Ignore-UAS-for-JMicron-JMS567-ATA-ATAPI-Bridge.patch new file mode 100644 index 0000000..4fd86f9 --- /dev/null +++ b/patches.suse/USB-Ignore-UAS-for-JMicron-JMS567-ATA-ATAPI-Bridge.patch @@ -0,0 +1,42 @@ +From 9aa37788e7ebb3f489fb4b71ce07adadd444264a Mon Sep 17 00:00:00 2001 +From: Cyril Roelandt +Date: Tue, 25 Aug 2020 23:22:31 +0200 +Subject: [PATCH] USB: Ignore UAS for JMicron JMS567 ATA/ATAPI Bridge +Git-commit: 9aa37788e7ebb3f489fb4b71ce07adadd444264a +Patch-mainline: v5.9-rc3 +References: git-fixes + +This device does not support UAS properly and a similar entry already +exists in drivers/usb/storage/unusual_uas.h. Without this patch, +storage_probe() defers the handling of this device to UAS, which cannot +handle it either. + +Tested-by: Brice Goglin +Fixes: bc3bdb12bbb3 ("usb-storage: Disable UAS on JMicron SATA enclosure") +Acked-by: Alan Stern +Cc: +Signed-off-by: Cyril Roelandt +Link: https://lore.kernel.org/r/20200825212231.46309-1-tipecaml@gmail.com +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/usb/storage/unusual_devs.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 220ae2c356ee..5732e9691f08 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, +- US_FL_BROKEN_FUA ), ++ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), + + /* Reported by Andrey Rahmatullin */ + UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, +-- +2.16.4 + diff --git a/patches.suse/USB-cdc-acm-rework-notification_buffer-resizing.patch b/patches.suse/USB-cdc-acm-rework-notification_buffer-resizing.patch new file mode 100644 index 0000000..636eb50 --- /dev/null +++ b/patches.suse/USB-cdc-acm-rework-notification_buffer-resizing.patch @@ -0,0 +1,104 @@ +From f4b9d8a582f738c24ebeabce5cc15f4b8159d74e Mon Sep 17 00:00:00 2001 +From: Tom Rix +Date: Sat, 1 Aug 2020 08:21:54 -0700 +Subject: [PATCH] USB: cdc-acm: rework notification_buffer resizing +Git-commit: f4b9d8a582f738c24ebeabce5cc15f4b8159d74e +Patch-mainline: v5.9-rc3 +References: git-fixes + +Clang static analysis reports this error + +cdc-acm.c:409:3: warning: Use of memory after it is freed + acm_process_notification(acm, (unsigned char *)dr); + +There are three problems, the first one is that dr is not reset + +The variable dr is set with + +if (acm->nb_index) + dr = (struct usb_cdc_notification *)acm->notification_buffer; + +But if the notification_buffer is too small it is resized with + + if (acm->nb_size) { + kfree(acm->notification_buffer); + acm->nb_size = 0; + } + alloc_size = roundup_pow_of_two(expected_size); + /* + * kmalloc ensures a valid notification_buffer after a + * use of kfree in case the previous allocation was too + * small. Final freeing is done on disconnect. + */ + acm->notification_buffer = + kmalloc(alloc_size, GFP_ATOMIC); + +dr should point to the new acm->notification_buffer. + +The second problem is any data in the notification_buffer is lost +when the pointer is freed. In the normal case, the current data +is accumulated in the notification_buffer here. + + memcpy(&acm->notification_buffer[acm->nb_index], + urb->transfer_buffer, copy_size); + +When a resize happens, anything before +notification_buffer[acm->nb_index] is garbage. + +The third problem is the acm->nb_index is not reset on a +resizing buffer error. + +So switch resizing to using krealloc and reassign dr and +reset nb_index. + +Fixes: ea2583529cd1 ("cdc-acm: reassemble fragmented notifications") +Signed-off-by: Tom Rix +Cc: stable +Acked-by: Oliver Neukum +Link: https://lore.kernel.org/r/20200801152154.20683-1-trix@redhat.com +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/usb/class/cdc-acm.c | 22 ++++++++++------------ + 1 file changed, 10 insertions(+), 12 deletions(-) + +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 991786876dbb..7f6f3ab5b8a6 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb) + if (current_size < expected_size) { + /* notification is transmitted fragmented, reassemble */ + if (acm->nb_size < expected_size) { +- if (acm->nb_size) { +- kfree(acm->notification_buffer); +- acm->nb_size = 0; +- } ++ u8 *new_buffer; + alloc_size = roundup_pow_of_two(expected_size); +- /* +- * kmalloc ensures a valid notification_buffer after a +- * use of kfree in case the previous allocation was too +- * small. Final freeing is done on disconnect. +- */ +- acm->notification_buffer = +- kmalloc(alloc_size, GFP_ATOMIC); +- if (!acm->notification_buffer) ++ /* Final freeing is done on disconnect. */ ++ new_buffer = krealloc(acm->notification_buffer, ++ alloc_size, GFP_ATOMIC); ++ if (!new_buffer) { ++ acm->nb_index = 0; + goto exit; ++ } ++ ++ acm->notification_buffer = new_buffer; + acm->nb_size = alloc_size; ++ dr = (struct usb_cdc_notification *)acm->notification_buffer; + } + + copy_size = min(current_size, +-- +2.16.4 + diff --git a/patches.suse/USB-rename-USB-quirk-to-USB_QUIRK_ENDPOINT_IGNORE.patch b/patches.suse/USB-rename-USB-quirk-to-USB_QUIRK_ENDPOINT_IGNORE.patch new file mode 100644 index 0000000..c382c4e --- /dev/null +++ b/patches.suse/USB-rename-USB-quirk-to-USB_QUIRK_ENDPOINT_IGNORE.patch @@ -0,0 +1,117 @@ +From 91c7eaa686c3b7ae2d5b2aed22a45a02c8baa30e Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman +Date: Thu, 18 Jun 2020 11:42:53 +0200 +Subject: [PATCH] USB: rename USB quirk to USB_QUIRK_ENDPOINT_IGNORE +Mime-version: 1.0 +Content-type: text/plain; charset=UTF-8 +Content-transfer-encoding: 8bit +Git-commit: 91c7eaa686c3b7ae2d5b2aed22a45a02c8baa30e +Patch-mainline: v5.9-rc1 +References: git-fixes + +The USB core has a quirk flag to ignore specific endpoints, so rename it +to be more obvious what this quirk does. + +Cc: Johan Hovold +Cc: Alan Stern +Cc: Richard Dodd +Cc: Hans de Goede +Cc: Jonathan Cox +Cc: Bastien Nocera +Cc: "Thiébaud Weksteen" +Cc: Nishad Kamdar +Link: https://lore.kernel.org/r/20200618094300.1887727-2-gregkh@linuxfoundation.org +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/usb/core/config.c | 8 ++++---- + drivers/usb/core/quirks.c | 18 +++++++++--------- + drivers/usb/core/usb.h | 2 +- + include/linux/usb/quirks.h | 4 ++-- + 4 files changed, 16 insertions(+), 16 deletions(-) + +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -298,10 +298,10 @@ static int usb_parse_endpoint(struct dev + goto skip_to_next_endpoint_or_interface_descriptor; + } + +- /* Ignore blacklisted endpoints */ +- if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) { +- if (usb_endpoint_is_blacklisted(udev, ifp, d)) { +- dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n", ++ /* Ignore some endpoints */ ++ if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) { ++ if (usb_endpoint_is_ignored(udev, ifp, d)) { ++ dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", + cfgno, inum, asnum, + d->bEndpointAddress); + goto skip_to_next_endpoint_or_interface_descriptor; +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -215,7 +215,7 @@ static const struct usb_device_id usb_qu + + /* Sound Devices USBPre2 */ + { USB_DEVICE(0x0926, 0x0202), .driver_info = +- USB_QUIRK_ENDPOINT_BLACKLIST }, ++ USB_QUIRK_ENDPOINT_IGNORE }, + + /* Keytouch QWERTY Panel keyboard */ + { USB_DEVICE(0x0926, 0x3333), .driver_info = +@@ -340,24 +340,24 @@ static const struct usb_device_id usb_am + }; + + /* +- * Entries for blacklisted endpoints that should be ignored when parsing +- * configuration descriptors. ++ * Entries for endpoints that should be ignored when parsing configuration ++ * descriptors. + * +- * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. ++ * Matched for devices with USB_QUIRK_ENDPOINT_IGNORE. + */ +-static const struct usb_device_id usb_endpoint_blacklist[] = { ++static const struct usb_device_id usb_endpoint_ignore[] = { + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { } + }; + +-bool usb_endpoint_is_blacklisted(struct usb_device *udev, +- struct usb_host_interface *intf, +- struct usb_endpoint_descriptor *epd) ++bool usb_endpoint_is_ignored(struct usb_device *udev, ++ struct usb_host_interface *intf, ++ struct usb_endpoint_descriptor *epd) + { + const struct usb_device_id *id; + unsigned int address; + +- for (id = usb_endpoint_blacklist; id->match_flags; ++id) { ++ for (id = usb_endpoint_ignore; id->match_flags; ++id) { + if (!usb_match_device(udev, id)) + continue; + +--- a/drivers/usb/core/usb.h ++++ b/drivers/usb/core/usb.h +@@ -36,7 +36,7 @@ extern void usb_deauthorize_interface(st + extern void usb_authorize_interface(struct usb_interface *); + extern void usb_detect_quirks(struct usb_device *udev); + extern void usb_detect_interface_quirks(struct usb_device *udev); +-extern bool usb_endpoint_is_blacklisted(struct usb_device *udev, ++extern bool usb_endpoint_is_ignored(struct usb_device *udev, + struct usb_host_interface *intf, + struct usb_endpoint_descriptor *epd); + extern int usb_remove_device(struct usb_device *udev); +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -59,7 +59,7 @@ + /* Device needs a pause after every control message. */ + #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) + +-/* device has blacklisted endpoints */ +-#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) ++/* device has endpoints that should be ignored */ ++#define USB_QUIRK_ENDPOINT_IGNORE BIT(15) + + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/patches.suse/USB-serial-ftdi_sio-clean-up-receive-processing.patch b/patches.suse/USB-serial-ftdi_sio-clean-up-receive-processing.patch new file mode 100644 index 0000000..ae24f8f --- /dev/null +++ b/patches.suse/USB-serial-ftdi_sio-clean-up-receive-processing.patch @@ -0,0 +1,75 @@ +From ce054039ba5e47b75a3be02a00274e52b06a6456 Mon Sep 17 00:00:00 2001 +From: Johan Hovold +Date: Wed, 8 Jul 2020 14:49:52 +0200 +Subject: [PATCH] USB: serial: ftdi_sio: clean up receive processing +Git-commit: ce054039ba5e47b75a3be02a00274e52b06a6456 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Clean up receive processing by dropping the character pointer and +keeping the length argument unchanged throughout the function. + +Also make it more apparent that sysrq processing can consume a +characters by adding an explicit continue. + +Reviewed-by: Greg Kroah-Hartman +Signed-off-by: Johan Hovold +Acked-by: Takashi Iwai + +--- + drivers/usb/serial/ftdi_sio.c | 19 +++++++++---------- + 1 file changed, 9 insertions(+), 10 deletions(-) + +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 96b9e2768ac5..33f1cca7eaa6 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -2483,7 +2483,6 @@ static int ftdi_process_packet(struct usb_serial_port *port, + struct ftdi_private *priv, unsigned char *buf, int len) + { + unsigned char status; +- unsigned char *ch; + int i; + char flag; + +@@ -2526,8 +2525,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, + else + priv->transmit_empty = 0; + +- len -= 2; +- if (!len) ++ if (len == 2) + return 0; /* status only */ + + /* +@@ -2556,19 +2554,20 @@ static int ftdi_process_packet(struct usb_serial_port *port, + } + } + +- port->icount.rx += len; +- ch = buf + 2; ++ port->icount.rx += len - 2; + + if (port->port.console && port->sysrq) { +- for (i = 0; i < len; i++, ch++) { +- if (!usb_serial_handle_sysrq_char(port, *ch)) +- tty_insert_flip_char(&port->port, *ch, flag); ++ for (i = 2; i < len; i++) { ++ if (usb_serial_handle_sysrq_char(port, buf[i])) ++ continue; ++ tty_insert_flip_char(&port->port, buf[i], flag); + } + } else { +- tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len); ++ tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, ++ len - 2); + } + +- return len; ++ return len - 2; + } + + static void ftdi_process_read_urb(struct urb *urb) +-- +2.16.4 + diff --git a/patches.suse/USB-serial-ftdi_sio-fix-break-and-sysrq-handling.patch b/patches.suse/USB-serial-ftdi_sio-fix-break-and-sysrq-handling.patch new file mode 100644 index 0000000..d779f0e --- /dev/null +++ b/patches.suse/USB-serial-ftdi_sio-fix-break-and-sysrq-handling.patch @@ -0,0 +1,87 @@ +From 733fff67941dad64b8a630450b8372b1873edc41 Mon Sep 17 00:00:00 2001 +From: Johan Hovold +Date: Wed, 8 Jul 2020 14:49:53 +0200 +Subject: [PATCH] USB: serial: ftdi_sio: fix break and sysrq handling +Git-commit: 733fff67941dad64b8a630450b8372b1873edc41 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Only the last NUL in a packet should be flagged as a break character, +for example, to avoid dropping unrelated characters when IGNBRK is set. + +Also make sysrq work by consuming the break character instead of having +it immediately cancel the sysrq request, and by not processing it +prematurely to avoid triggering a sysrq based on an unrelated character +received in the same packet (which was received *before* the break). + +Note that the break flag can be left set also for a packet received +immediately following a break and that and an ending NUL in such a +packet will continue to be reported as a break as there's no good way to +tell it apart from an actual break. + +Tested on FT232R and FT232H. + +Fixes: 72fda3ca6fc1 ("USB: serial: ftd_sio: implement sysrq handling on break") +Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") +Reviewed-by: Greg Kroah-Hartman +Signed-off-by: Johan Hovold +Acked-by: Takashi Iwai + +--- + drivers/usb/serial/ftdi_sio.c | 24 +++++++++++++++++------- + 1 file changed, 17 insertions(+), 7 deletions(-) + +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 33f1cca7eaa6..07b146d7033a 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -2483,6 +2483,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, + struct ftdi_private *priv, unsigned char *buf, int len) + { + unsigned char status; ++ bool brkint = false; + int i; + char flag; + +@@ -2534,13 +2535,17 @@ static int ftdi_process_packet(struct usb_serial_port *port, + */ + flag = TTY_NORMAL; + if (buf[1] & FTDI_RS_ERR_MASK) { +- /* Break takes precedence over parity, which takes precedence +- * over framing errors */ +- if (buf[1] & FTDI_RS_BI) { +- flag = TTY_BREAK; ++ /* ++ * Break takes precedence over parity, which takes precedence ++ * over framing errors. Note that break is only associated ++ * with the last character in the buffer and only when it's a ++ * NUL. ++ */ ++ if (buf[1] & FTDI_RS_BI && buf[len - 1] == '\0') { + port->icount.brk++; +- usb_serial_handle_break(port); +- } else if (buf[1] & FTDI_RS_PE) { ++ brkint = true; ++ } ++ if (buf[1] & FTDI_RS_PE) { + flag = TTY_PARITY; + port->icount.parity++; + } else if (buf[1] & FTDI_RS_FE) { +@@ -2556,8 +2561,13 @@ static int ftdi_process_packet(struct usb_serial_port *port, + + port->icount.rx += len - 2; + +- if (port->port.console && port->sysrq) { ++ if (brkint || (port->port.console && port->sysrq)) { + for (i = 2; i < len; i++) { ++ if (brkint && i == len - 1) { ++ if (usb_serial_handle_break(port)) ++ return len - 3; ++ flag = TTY_BREAK; ++ } + if (usb_serial_handle_sysrq_char(port, buf[i])) + continue; + tty_insert_flip_char(&port->port, buf[i], flag); +-- +2.16.4 + diff --git a/patches.suse/USB-serial-ftdi_sio-make-process-packet-buffer-unsig.patch b/patches.suse/USB-serial-ftdi_sio-make-process-packet-buffer-unsig.patch new file mode 100644 index 0000000..a38beb6 --- /dev/null +++ b/patches.suse/USB-serial-ftdi_sio-make-process-packet-buffer-unsig.patch @@ -0,0 +1,96 @@ +From ab4cc4ef6724ea588e835fc1e764c4b4407a70b7 Mon Sep 17 00:00:00 2001 +From: Johan Hovold +Date: Wed, 8 Jul 2020 14:49:51 +0200 +Subject: [PATCH] USB: serial: ftdi_sio: make process-packet buffer unsigned +Git-commit: ab4cc4ef6724ea588e835fc1e764c4b4407a70b7 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Use an unsigned type for the process-packet buffer argument and give it +a more apt name. + +Reviewed-by: Greg Kroah-Hartman +Signed-off-by: Johan Hovold +Acked-by: Takashi Iwai + +--- + drivers/usb/serial/ftdi_sio.c | 22 +++++++++++----------- + 1 file changed, 11 insertions(+), 11 deletions(-) + +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 9ad44a96dfe3..96b9e2768ac5 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -2480,12 +2480,12 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, + #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) + + static int ftdi_process_packet(struct usb_serial_port *port, +- struct ftdi_private *priv, char *packet, int len) ++ struct ftdi_private *priv, unsigned char *buf, int len) + { ++ unsigned char status; ++ unsigned char *ch; + int i; +- char status; + char flag; +- char *ch; + + if (len < 2) { + dev_dbg(&port->dev, "malformed packet\n"); +@@ -2495,7 +2495,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, + /* Compare new line status to the old one, signal if different/ + N.B. packet may be processed more than once, but differences + are only processed once. */ +- status = packet[0] & FTDI_STATUS_B0_MASK; ++ status = buf[0] & FTDI_STATUS_B0_MASK; + if (status != priv->prev_status) { + char diff_status = status ^ priv->prev_status; + +@@ -2521,7 +2521,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, + } + + /* save if the transmitter is empty or not */ +- if (packet[1] & FTDI_RS_TEMT) ++ if (buf[1] & FTDI_RS_TEMT) + priv->transmit_empty = 1; + else + priv->transmit_empty = 0; +@@ -2535,29 +2535,29 @@ static int ftdi_process_packet(struct usb_serial_port *port, + * data payload to avoid over-reporting. + */ + flag = TTY_NORMAL; +- if (packet[1] & FTDI_RS_ERR_MASK) { ++ if (buf[1] & FTDI_RS_ERR_MASK) { + /* Break takes precedence over parity, which takes precedence + * over framing errors */ +- if (packet[1] & FTDI_RS_BI) { ++ if (buf[1] & FTDI_RS_BI) { + flag = TTY_BREAK; + port->icount.brk++; + usb_serial_handle_break(port); +- } else if (packet[1] & FTDI_RS_PE) { ++ } else if (buf[1] & FTDI_RS_PE) { + flag = TTY_PARITY; + port->icount.parity++; +- } else if (packet[1] & FTDI_RS_FE) { ++ } else if (buf[1] & FTDI_RS_FE) { + flag = TTY_FRAME; + port->icount.frame++; + } + /* Overrun is special, not associated with a char */ +- if (packet[1] & FTDI_RS_OE) { ++ if (buf[1] & FTDI_RS_OE) { + port->icount.overrun++; + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); + } + } + + port->icount.rx += len; +- ch = packet + 2; ++ ch = buf + 2; + + if (port->port.console && port->sysrq) { + for (i = 0; i < len; i++, ch++) { +-- +2.16.4 + diff --git a/patches.suse/USB-serial-qcserial-add-EM7305-QDL-product-ID.patch b/patches.suse/USB-serial-qcserial-add-EM7305-QDL-product-ID.patch new file mode 100644 index 0000000..e72cb6b --- /dev/null +++ b/patches.suse/USB-serial-qcserial-add-EM7305-QDL-product-ID.patch @@ -0,0 +1,48 @@ +From d2a4309c1ab6df424b2239fe2920d6f26f808d17 Mon Sep 17 00:00:00 2001 +From: Erik Ekman +Date: Fri, 17 Jul 2020 20:51:18 +0200 +Subject: [PATCH] USB: serial: qcserial: add EM7305 QDL product ID +Git-commit: d2a4309c1ab6df424b2239fe2920d6f26f808d17 +Patch-mainline: v5.9-rc1 +References: git-fixes + +When running qmi-firmware-update on the Sierra Wireless EM7305 in a Toshiba +laptop, it changed product ID to 0x9062 when entering QDL mode: + +usb 2-4: new high-speed USB device number 78 using xhci_hcd +usb 2-4: New USB device found, idVendor=1199, idProduct=9062, bcdDevice= 0.00 +usb 2-4: New USB device strings: Mfr=1, Product=2, SerialNumber=0 +usb 2-4: Product: EM7305 +usb 2-4: Manufacturer: Sierra Wireless, Incorporated + +The upgrade could complete after running + # echo 1199 9062 > /sys/bus/usb-serial/drivers/qcserial/new_id + +qcserial 2-4:1.0: Qualcomm USB modem converter detected +usb 2-4: Qualcomm USB modem converter now attached to ttyUSB0 + +Signed-off-by: Erik Ekman +Link: https://lore.kernel.org/r/20200717185118.3640219-1-erik@kryo.se +Cc: stable@vger.kernel.org +Signed-off-by: Johan Hovold +Acked-by: Takashi Iwai + +--- + drivers/usb/serial/qcserial.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index 5dfbbaef38bb..c8d1ea0e6e6f 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ ++ {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ + {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ + {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ +-- +2.16.4 + diff --git a/patches.suse/char-virtio-Select-VIRTIO-from-VIRTIO_CONSOLE.patch b/patches.suse/char-virtio-Select-VIRTIO-from-VIRTIO_CONSOLE.patch new file mode 100644 index 0000000..96148f4 --- /dev/null +++ b/patches.suse/char-virtio-Select-VIRTIO-from-VIRTIO_CONSOLE.patch @@ -0,0 +1,33 @@ +From 3a22b4650ead54a28315e3c0e5116ca5e130e977 Mon Sep 17 00:00:00 2001 +From: Michal Suchanek +Date: Mon, 31 Aug 2020 16:35:28 +0200 +Subject: [PATCH] char: virtio: Select VIRTIO from VIRTIO_CONSOLE. + +References: bsc#1175667 +Patch-mainline: submitted https://lore.kernel.org/lkml/20200831165850.26163-1-msuchanek@suse.de/ + +Make it possible to make virtio console built-in when +other virtio drivers are modular. + +Signed-off-by: Michal Suchanek +--- + drivers/char/Kconfig | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig +--- a/drivers/char/Kconfig ++++ b/drivers/char/Kconfig +@@ -158,8 +158,9 @@ source "drivers/tty/hvc/Kconfig" + + config VIRTIO_CONSOLE + tristate "Virtio console" +- depends on VIRTIO && TTY ++ depends on TTY + select HVC_DRIVER ++ select VIRTIO + help + Virtio console for use with lguest and other hypervisors. + +-- +2.28.0 + diff --git a/patches.suse/device-property-Fix-the-secondary-firmware-node-hand.patch b/patches.suse/device-property-Fix-the-secondary-firmware-node-hand.patch new file mode 100644 index 0000000..aced1ad --- /dev/null +++ b/patches.suse/device-property-Fix-the-secondary-firmware-node-hand.patch @@ -0,0 +1,63 @@ +From c15e1bdda4365a5f17cdadf22bf1c1df13884a9e Mon Sep 17 00:00:00 2001 +From: Heikki Krogerus +Date: Fri, 21 Aug 2020 13:53:42 +0300 +Subject: [PATCH] device property: Fix the secondary firmware node handling in set_primary_fwnode() +Git-commit: c15e1bdda4365a5f17cdadf22bf1c1df13884a9e +Patch-mainline: v5.9-rc3 +References: git-fixes + +When the primary firmware node pointer is removed from a +device (set to NULL) the secondary firmware node pointer, +when it exists, is made the primary node for the device. +However, the secondary firmware node pointer of the original +primary firmware node is never cleared (set to NULL). + +To avoid situation where the secondary firmware node pointer +is pointing to a non-existing object, clearing it properly +when the primary node is removed from a device in +set_primary_fwnode(). + +Fixes: 97badf873ab6 ("device property: Make it possible to use secondary firmware nodes") +Cc: All applicable +Signed-off-by: Heikki Krogerus +Signed-off-by: Rafael J. Wysocki +Acked-by: Takashi Iwai + +--- + drivers/base/core.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +diff --git a/drivers/base/core.c b/drivers/base/core.c +index ac1046a382bc..f6f620aa9408 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -4264,9 +4264,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) + */ + void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) + { +- if (fwnode) { +- struct fwnode_handle *fn = dev->fwnode; ++ struct fwnode_handle *fn = dev->fwnode; + ++ if (fwnode) { + if (fwnode_is_primary(fn)) + fn = fn->secondary; + +@@ -4276,8 +4276,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) + } + dev->fwnode = fwnode; + } else { +- dev->fwnode = fwnode_is_primary(dev->fwnode) ? +- dev->fwnode->secondary : NULL; ++ if (fwnode_is_primary(fn)) { ++ dev->fwnode = fn->secondary; ++ fn->secondary = NULL; ++ } else { ++ dev->fwnode = NULL; ++ } + } + } + EXPORT_SYMBOL_GPL(set_primary_fwnode); +-- +2.16.4 + diff --git a/patches.suse/drm-amd-display-fix-pow-crashing-when-given-base-0.patch b/patches.suse/drm-amd-display-fix-pow-crashing-when-given-base-0.patch new file mode 100644 index 0000000..e93048a --- /dev/null +++ b/patches.suse/drm-amd-display-fix-pow-crashing-when-given-base-0.patch @@ -0,0 +1,40 @@ +From d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c Mon Sep 17 00:00:00 2001 +From: Krunoslav Kovac +Date: Thu, 6 Aug 2020 17:54:47 -0400 +Subject: [PATCH] drm/amd/display: fix pow() crashing when given base 0 +Git-commit: d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c +Patch-mainline: v5.9-rc2 +References: git-fixes + +[Why&How] +pow(a,x) is implemented as exp(x*log(a)). log(0) will crash. +So return 0^x = 0, unless x=0, convention seems to be 0^0 = 1. + +Cc: stable@vger.kernel.org +Signed-off-by: Krunoslav Kovac +Reviewed-by: Anthony Koo +Acked-by: Rodrigo Siqueira +Signed-off-by: Alex Deucher +Acked-by: Takashi Iwai + +--- + drivers/gpu/drm/amd/display/include/fixed31_32.h | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h +index 89ef9f6860e5..16df2a485dd0 100644 +--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h ++++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h +@@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); + */ + static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) + { ++ if (arg1.value == 0) ++ return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; ++ + return dc_fixpt_exp( + dc_fixpt_mul( + dc_fixpt_log(arg1), +-- +2.16.4 + diff --git a/patches.suse/drm-msm-adreno-fix-updating-ring-fence.patch b/patches.suse/drm-msm-adreno-fix-updating-ring-fence.patch new file mode 100644 index 0000000..06c0e91 --- /dev/null +++ b/patches.suse/drm-msm-adreno-fix-updating-ring-fence.patch @@ -0,0 +1,41 @@ +From f228af11dfa1d1616bc67f3a4119ab77c36181f1 Mon Sep 17 00:00:00 2001 +From: Rob Clark +Date: Wed, 12 Aug 2020 17:03:09 -0700 +Subject: [PATCH] drm/msm/adreno: fix updating ring fence +Git-commit: f228af11dfa1d1616bc67f3a4119ab77c36181f1 +Patch-mainline: v5.9-rc3 +References: git-fixes + +We need to set it to the most recent completed fence, not the most +recent submitted. Otherwise we have races where we think we can retire +submits that the GPU is not finished with, if the GPU doesn't manage to +overwrite the seqno before we look at it. + +This can show up with hang recovery if one of the submits after the +crashing submit also hangs after it is replayed. + +Fixes: f97decac5f4c ("drm/msm: Support multiple ringbuffers") +Signed-off-by: Rob Clark +Reviewed-by: Jordan Crouse +Acked-by: Takashi Iwai + +--- + drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index e23641a5ec84..d2dbb6968cba 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -396,7 +396,7 @@ int adreno_hw_init(struct msm_gpu *gpu) + ring->next = ring->start; + + /* reset completed fence seqno: */ +- ring->memptrs->fence = ring->seqno; ++ ring->memptrs->fence = ring->fctx->completed_fence; + ring->memptrs->rptr = 0; + } + +-- +2.16.4 + diff --git a/patches.suse/i2c-rcar-in-slave-mode-clear-NACK-earlier.patch b/patches.suse/i2c-rcar-in-slave-mode-clear-NACK-earlier.patch new file mode 100644 index 0000000..e476f71 --- /dev/null +++ b/patches.suse/i2c-rcar-in-slave-mode-clear-NACK-earlier.patch @@ -0,0 +1,38 @@ +From 914a7b3563b8fb92f976619bbd0fa3a4a708baae Mon Sep 17 00:00:00 2001 +From: Wolfram Sang +Date: Mon, 17 Aug 2020 14:19:30 +0200 +Subject: [PATCH] i2c: rcar: in slave mode, clear NACK earlier +Git-commit: 914a7b3563b8fb92f976619bbd0fa3a4a708baae +Patch-mainline: v5.9-rc3 +References: git-fixes + +Currently, a NACK in slave mode is set/cleared when SCL is held low by +the IP core right before the bit is about to be pushed out. This is too +late for clearing and then a NACK from the previous byte is still used +for the current one. Now, let's clear the NACK right after we detected +the STOP condition following the NACK. + +Fixes: de20d1857dd6 ("i2c: rcar: add slave support") +Signed-off-by: Wolfram Sang +Signed-off-by: Wolfram Sang +Acked-by: Takashi Iwai + +--- + drivers/i2c/busses/i2c-rcar.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index 9e883474db8c..c7c543483b08 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -590,6 +590,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + /* master sent stop */ + if (ssr_filtered & SSR) { + i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); ++ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ + rcar_i2c_write(priv, ICSIER, SAR); + rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); + } +-- +2.16.4 + diff --git a/patches.suse/ibmvnic-fix-NULL-tx_pools-and-rx_tools-issue-at-do_r.patch b/patches.suse/ibmvnic-fix-NULL-tx_pools-and-rx_tools-issue-at-do_r.patch new file mode 100644 index 0000000..b392254 --- /dev/null +++ b/patches.suse/ibmvnic-fix-NULL-tx_pools-and-rx_tools-issue-at-do_r.patch @@ -0,0 +1,84 @@ +From 9f13457377907fa253aef560e1a37e1ca4197f9b Mon Sep 17 00:00:00 2001 +From: Mingming Cao +Date: Tue, 25 Aug 2020 13:26:41 -0400 +Subject: [PATCH] ibmvnic fix NULL tx_pools and rx_tools issue at do_reset + +References: bsc#1175873 ltc#187922 +Patch-mainline: queued +Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git +Git-commit: 9f13457377907fa253aef560e1a37e1ca4197f9b + +At the time of do_rest, ibmvnic tries to re-initalize the tx_pools +and rx_pools to avoid re-allocating the long term buffer. However +there is a window inside do_reset that the tx_pools and +rx_pools were freed before re-initialized making it possible to deference +null pointers. + +This patch fix this issue by always check the tx_pool +and rx_pool are not NULL after ibmvnic_login. If so, re-allocating +the pools. This will avoid getting into calling reset_tx/rx_pools with +NULL adapter tx_pools/rx_pools pointer. Also add null pointer check in +reset_tx_pools and reset_rx_pools to safe handle NULL pointer case. + +Signed-off-by: Mingming Cao +Signed-off-by: Dany Madden +Signed-off-by: David S. Miller +Acked-by: Michal Suchanek +--- + drivers/net/ethernet/ibm/ibmvnic.c | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 5afb3c9c52d2..d3a774331afc 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -479,6 +479,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) + int i, j, rc; + u64 *size_array; + ++ if (!adapter->rx_pool) ++ return -1; ++ + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); + +@@ -649,6 +652,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) + int tx_scrqs; + int i, rc; + ++ if (!adapter->tx_pool) ++ return -1; ++ + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + for (i = 0; i < tx_scrqs; i++) { + rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); +@@ -2011,7 +2017,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, + adapter->req_rx_add_entries_per_subcrq != + old_num_rx_slots || + adapter->req_tx_entries_per_subcrq != +- old_num_tx_slots) { ++ old_num_tx_slots || ++ !adapter->rx_pool || ++ !adapter->tso_pool || ++ !adapter->tx_pool) { + release_rx_pools(adapter); + release_tx_pools(adapter); + release_napi(adapter); +@@ -2024,10 +2033,14 @@ static int do_reset(struct ibmvnic_adapter *adapter, + } else { + rc = reset_tx_pools(adapter); + if (rc) ++ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", ++ rc); + goto out; + + rc = reset_rx_pools(adapter); + if (rc) ++ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", ++ rc); + goto out; + } + ibmvnic_disable_irqs(adapter); +-- +2.28.0 + diff --git a/patches.suse/ip6_tunnel-allow-not-to-count-pkts-on-tstats-by-pass.patch b/patches.suse/ip6_tunnel-allow-not-to-count-pkts-on-tstats-by-pass.patch new file mode 100644 index 0000000..86ab068 --- /dev/null +++ b/patches.suse/ip6_tunnel-allow-not-to-count-pkts-on-tstats-by-pass.patch @@ -0,0 +1,42 @@ +From 6f6a8622057c92408930c31698394fae1557b188 Mon Sep 17 00:00:00 2001 +From: Xin Long +Date: Mon, 17 Jun 2019 21:34:14 +0800 +Subject: [PATCH] ip6_tunnel: allow not to count pkts on tstats by passing dev as NULL +Git-commit: 6f6a8622057c92408930c31698394fae1557b188 +Patch-mainline: v5.2-rc6 +References: bsc#1175515 + +A similar fix to Patch "ip_tunnel: allow not to count pkts on tstats by +setting skb's dev to NULL" is also needed by ip6_tunnel. + +Signed-off-by: Xin Long +Signed-off-by: David S. Miller +Acked-by: Takashi Iwai + +--- + include/net/ip6_tunnel.h | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h +index 69b4bcf880c9..028eaea1c854 100644 +--- a/include/net/ip6_tunnel.h ++++ b/include/net/ip6_tunnel.h +@@ -158,9 +158,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + pkt_len = skb->len - skb_inner_network_offset(skb); + err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); +- if (unlikely(net_xmit_eval(err))) +- pkt_len = -1; +- iptunnel_xmit_stats(dev, pkt_len); ++ ++ if (dev) { ++ if (unlikely(net_xmit_eval(err))) ++ pkt_len = -1; ++ iptunnel_xmit_stats(dev, pkt_len); ++ } + } + #endif + #endif +-- +2.16.4 + diff --git a/patches.suse/ip_tunnel-allow-not-to-count-pkts-on-tstats-by-setti.patch b/patches.suse/ip_tunnel-allow-not-to-count-pkts-on-tstats-by-setti.patch new file mode 100644 index 0000000..9f07e42 --- /dev/null +++ b/patches.suse/ip_tunnel-allow-not-to-count-pkts-on-tstats-by-setti.patch @@ -0,0 +1,46 @@ +From 5684abf7020dfc5f0b6ba1d68eda3663871fce52 Mon Sep 17 00:00:00 2001 +From: Xin Long +Date: Mon, 17 Jun 2019 21:34:13 +0800 +Subject: [PATCH] ip_tunnel: allow not to count pkts on tstats by setting skb's dev to NULL +Git-commit: 5684abf7020dfc5f0b6ba1d68eda3663871fce52 +Patch-mainline: v5.2-rc6 +References: bsc#1175515 + +iptunnel_xmit() works as a common function, also used by a udp tunnel +which doesn't have to have a tunnel device, like how TIPC works with +udp media. + +In these cases, we should allow not to count pkts on dev's tstats, so +that udp tunnel can work with no tunnel device safely. + +Signed-off-by: Xin Long +Signed-off-by: David S. Miller +Acked-by: Takashi Iwai + +--- + net/ipv4/ip_tunnel_core.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c +index 9e3846388fb3..1452a97914a0 100644 +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -76,9 +76,12 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, + __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1); + + err = ip_local_out(net, sk, skb); +- if (unlikely(net_xmit_eval(err))) +- pkt_len = 0; +- iptunnel_xmit_stats(dev, pkt_len); ++ ++ if (dev) { ++ if (unlikely(net_xmit_eval(err))) ++ pkt_len = 0; ++ iptunnel_xmit_stats(dev, pkt_len); ++ } + } + EXPORT_SYMBOL_GPL(iptunnel_xmit); + +-- +2.16.4 + diff --git a/patches.suse/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch b/patches.suse/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch new file mode 100644 index 0000000..630b016 --- /dev/null +++ b/patches.suse/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch @@ -0,0 +1,69 @@ +From 71e843295c680898959b22dc877ae3839cc22470 Mon Sep 17 00:00:00 2001 +From: Wei Yongjun +Date: Thu, 20 Aug 2020 17:42:14 -0700 +Subject: [PATCH] kernel/relay.c: fix memleak on destroy relay channel +Git-commit: 71e843295c680898959b22dc877ae3839cc22470 +Patch-mainline: v5.9-rc2 +References: git-fixes + +kmemleak report memory leak as follows: + + unreferenced object 0x607ee4e5f948 (size 8): + comm "syz-executor.1", pid 2098, jiffies 4295031601 (age 288.468s) + hex dump (first 8 bytes): + 00 00 00 00 00 00 00 00 ........ + backtrace: + relay_open kernel/relay.c:583 [inline] + relay_open+0xb6/0x970 kernel/relay.c:563 + do_blk_trace_setup+0x4a8/0xb20 kernel/trace/blktrace.c:557 + __blk_trace_setup+0xb6/0x150 kernel/trace/blktrace.c:597 + blk_trace_ioctl+0x146/0x280 kernel/trace/blktrace.c:738 + blkdev_ioctl+0xb2/0x6a0 block/ioctl.c:613 + block_ioctl+0xe5/0x120 fs/block_dev.c:1871 + vfs_ioctl fs/ioctl.c:48 [inline] + __do_sys_ioctl fs/ioctl.c:753 [inline] + __se_sys_ioctl fs/ioctl.c:739 [inline] + __x64_sys_ioctl+0x170/0x1ce fs/ioctl.c:739 + do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 + +'chan->buf' is malloced in relay_open() by alloc_percpu() but not free +while destroy the relay channel. Fix it by adding free_percpu() before +return from relay_destroy_channel(). + +Fixes: 017c59c042d0 ("relay: Use per CPU constructs for the relay channel buffer pointers") +Reported-by: Hulk Robot +Signed-off-by: Wei Yongjun +Signed-off-by: Andrew Morton +Reviewed-by: Chris Wilson +Cc: Al Viro +Cc: Michael Ellerman +Cc: David Rientjes +Cc: Michel Lespinasse +Cc: Daniel Axtens +Cc: Thomas Gleixner +Cc: Akash Goel +Cc: +Link: http://lkml.kernel.org/r/20200817122826.48518-1-weiyongjun1@huawei.com +Signed-off-by: Linus Torvalds +Acked-by: Takashi Iwai + +--- + kernel/relay.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/kernel/relay.c b/kernel/relay.c +index 72fe443ea78f..fb4e0c530c08 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -197,6 +197,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) + static void relay_destroy_channel(struct kref *kref) + { + struct rchan *chan = container_of(kref, struct rchan, kref); ++ free_percpu(chan->buf); + kfree(chan); + } + +-- +2.16.4 + diff --git a/patches.suse/media-budget-core-Improve-exception-handling-in-budg.patch b/patches.suse/media-budget-core-Improve-exception-handling-in-budg.patch new file mode 100644 index 0000000..44d7e45 --- /dev/null +++ b/patches.suse/media-budget-core-Improve-exception-handling-in-budg.patch @@ -0,0 +1,56 @@ +From fc0456458df8b3421dba2a5508cd817fbc20ea71 Mon Sep 17 00:00:00 2001 +From: Chuhong Yuan +Date: Fri, 5 Jun 2020 18:17:28 +0200 +Subject: [PATCH] media: budget-core: Improve exception handling in budget_register() +Git-commit: fc0456458df8b3421dba2a5508cd817fbc20ea71 +Patch-mainline: v5.9-rc1 +References: git-fixes + +budget_register() has no error handling after its failure. +Add the missed undo functions for error handling to fix it. + +Signed-off-by: Chuhong Yuan +Signed-off-by: Sean Young +Signed-off-by: Mauro Carvalho Chehab +Acked-by: Takashi Iwai + +--- + drivers/media/pci/ttpci/budget-core.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c +index fadbdeeb4495..293867b9e796 100644 +--- a/drivers/media/pci/ttpci/budget-core.c ++++ b/drivers/media/pci/ttpci/budget-core.c +@@ -369,20 +369,25 @@ static int budget_register(struct budget *budget) + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); + + if (ret < 0) +- return ret; ++ goto err_release_dmx; + + budget->mem_frontend.source = DMX_MEMORY_FE; + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); + if (ret < 0) +- return ret; ++ goto err_release_dmx; + + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); + if (ret < 0) +- return ret; ++ goto err_release_dmx; + + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); + + return 0; ++ ++err_release_dmx: ++ dvb_dmxdev_release(&budget->dmxdev); ++ dvb_dmx_release(&budget->demux); ++ return ret; + } + + static void budget_unregister(struct budget *budget) +-- +2.16.4 + diff --git a/patches.suse/media-vpss-clean-up-resources-in-init.patch b/patches.suse/media-vpss-clean-up-resources-in-init.patch new file mode 100644 index 0000000..14d17c3 --- /dev/null +++ b/patches.suse/media-vpss-clean-up-resources-in-init.patch @@ -0,0 +1,66 @@ +From 9c487b0b0ea7ff22127fe99a7f67657d8730ff94 Mon Sep 17 00:00:00 2001 +From: Evgeny Novikov +Date: Fri, 10 Jul 2020 11:02:23 +0200 +Subject: [PATCH] media: vpss: clean up resources in init +Git-commit: 9c487b0b0ea7ff22127fe99a7f67657d8730ff94 +Patch-mainline: v5.9-rc1 +References: git-fixes + +If platform_driver_register() fails within vpss_init() resources are not +cleaned up. The patch fixes this issue by introducing the corresponding +error handling. + +Found by Linux Driver Verification project (linuxtesting.org). + +Signed-off-by: Evgeny Novikov +Signed-off-by: Hans Verkuil +Signed-off-by: Mauro Carvalho Chehab +Acked-by: Takashi Iwai + +--- + drivers/media/platform/davinci/vpss.c | 20 ++++++++++++++++---- + 1 file changed, 16 insertions(+), 4 deletions(-) + +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c +index d38d2bbb6f0f..7000f0bf0b35 100644 +--- a/drivers/media/platform/davinci/vpss.c ++++ b/drivers/media/platform/davinci/vpss.c +@@ -505,19 +505,31 @@ static void vpss_exit(void) + + static int __init vpss_init(void) + { ++ int ret; ++ + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) + return -EBUSY; + + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); + if (unlikely(!oper_cfg.vpss_regs_base2)) { +- release_mem_region(VPSS_CLK_CTRL, 4); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_ioremap; + } + + writel(VPSS_CLK_CTRL_VENCCLKEN | +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); ++ ++ ret = platform_driver_register(&vpss_driver); ++ if (ret) ++ goto err_pd_register; ++ ++ return 0; + +- return platform_driver_register(&vpss_driver); ++err_pd_register: ++ iounmap(oper_cfg.vpss_regs_base2); ++err_ioremap: ++ release_mem_region(VPSS_CLK_CTRL, 4); ++ return ret; + } + subsys_initcall(vpss_init); + module_exit(vpss_exit); +-- +2.16.4 + diff --git a/patches.suse/msft-hv-1903-x86-hyperv-Create-and-use-Hyper-V-page-definitions.patch b/patches.suse/msft-hv-1903-x86-hyperv-Create-and-use-Hyper-V-page-definitions.patch new file mode 100644 index 0000000..53bd580 --- /dev/null +++ b/patches.suse/msft-hv-1903-x86-hyperv-Create-and-use-Hyper-V-page-definitions.patch @@ -0,0 +1,43 @@ +From: Maya Nakamura +Date: Fri, 12 Jul 2019 08:14:47 +0000 +Patch-mainline: v5.4-rc1 +Subject: x86/hyperv: Create and use Hyper-V page definitions +Git-commit: fcd3f6222a4ece735d0b3ffb93f646eff693aa69 +References: git-fixes + +Define HV_HYP_PAGE_SHIFT, HV_HYP_PAGE_SIZE, and HV_HYP_PAGE_MASK because +the Linux guest page size and hypervisor page size concepts are different, +even though they happen to be the same value on x86. + +Also, replace PAGE_SIZE with HV_HYP_PAGE_SIZE. + +Signed-off-by: Maya Nakamura +Signed-off-by: Thomas Gleixner +Reviewed-by: Michael Kelley +Reviewed-by: Vitaly Kuznetsov +Link: https://lkml.kernel.org/r/e95111629abf65d016e983f72494cbf110ce605f.1562916939.git.m.maya.nakamura@gmail.com +Acked-by: Olaf Hering +--- + arch/x86/include/asm/hyperv-tlfs.h | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h +--- a/arch/x86/include/asm/hyperv-tlfs.h ++++ b/arch/x86/include/asm/hyperv-tlfs.h +@@ -12,6 +12,16 @@ + #include + #include + ++/* ++ * While not explicitly listed in the TLFS, Hyper-V always runs with a page size ++ * of 4096. These definitions are used when communicating with Hyper-V using ++ * guest physical pages and guest physical page addresses, since the guest page ++ * size may not be 4096 on all architectures. ++ */ ++#define HV_HYP_PAGE_SHIFT 12 ++#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT) ++#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) ++ + /* + * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent + * is set by CPUID(HvCpuIdFunctionVersionAndFeatures). diff --git a/patches.suse/msft-hv-1907-hv_balloon-Use-a-static-page-for-the-balloon_up-send.patch b/patches.suse/msft-hv-1907-hv_balloon-Use-a-static-page-for-the-balloon_up-send.patch new file mode 100644 index 0000000..3112d5a --- /dev/null +++ b/patches.suse/msft-hv-1907-hv_balloon-Use-a-static-page-for-the-balloon_up-send.patch @@ -0,0 +1,78 @@ +From: Dexuan Cui +Date: Fri, 14 Jun 2019 18:42:17 +0000 +Patch-mainline: v5.4-rc1 +Subject: hv_balloon: Use a static page for the balloon_up send buffer +Git-commit: 1fed17df7e501b170f876b87eec11f930e3f1df5 +References: git-fixes + +It's unnecessary to dynamically allocate the buffer. + +Signed-off-by: Dexuan Cui +Reviewed-by: Michael Kelley +Signed-off-by: Sasha Levin +Acked-by: Olaf Hering +--- + drivers/hv/hv_balloon.c | 19 ++++--------------- + 1 file changed, 4 insertions(+), 15 deletions(-) + +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -494,7 +494,7 @@ enum hv_dm_state { + + + static __u8 recv_buffer[PAGE_SIZE]; +-static __u8 *send_buffer; ++static __u8 balloon_up_send_buffer[PAGE_SIZE]; + #define PAGES_IN_2M 512 + #define HA_CHUNK (32 * 1024) + +@@ -1292,8 +1292,8 @@ static void balloon_up(struct work_struct *dummy) + } + + while (!done) { +- bl_resp = (struct dm_balloon_response *)send_buffer; +- memset(send_buffer, 0, PAGE_SIZE); ++ memset(balloon_up_send_buffer, 0, PAGE_SIZE); ++ bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer; + bl_resp->hdr.type = DM_BALLOON_RESPONSE; + bl_resp->hdr.size = sizeof(struct dm_balloon_response); + bl_resp->more_pages = 1; +@@ -1578,19 +1578,11 @@ static int balloon_probe(struct hv_device *dev, + do_hot_add = false; + #endif + +- /* +- * First allocate a send buffer. +- */ +- +- send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); +- if (!send_buffer) +- return -ENOMEM; +- + ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, + balloon_onchannelcallback, dev); + + if (ret) +- goto probe_error0; ++ return ret; + + dm_device.dev = dev; + dm_device.state = DM_INITIALIZING; +@@ -1716,8 +1708,6 @@ probe_error2: + + probe_error1: + vmbus_close(dev->channel); +-probe_error0: +- kfree(send_buffer); + return ret; + } + +@@ -1736,7 +1726,6 @@ static int balloon_remove(struct hv_device *dev) + + vmbus_close(dev->channel); + kthread_stop(dm->thread); +- kfree(send_buffer); + #ifdef CONFIG_MEMORY_HOTPLUG + restore_online_page_callback(&hv_online_page); + unregister_memory_notifier(&hv_memory_nb); diff --git a/patches.suse/msft-hv-1910-hv_netvsc-Fix-a-warning-of-suspicious-RCU-usage.patch b/patches.suse/msft-hv-1910-hv_netvsc-Fix-a-warning-of-suspicious-RCU-usage.patch new file mode 100644 index 0000000..3ba5497 --- /dev/null +++ b/patches.suse/msft-hv-1910-hv_netvsc-Fix-a-warning-of-suspicious-RCU-usage.patch @@ -0,0 +1,48 @@ +From: Dexuan Cui +Date: Fri, 9 Aug 2019 01:58:08 +0000 +Patch-mainline: v5.3-rc6 +Subject: hv_netvsc: Fix a warning of suspicious RCU usage +Git-commit: 6d0d779dca73cd5acb649c54f81401f93098b298 +References: git-fixes + +This fixes a warning of "suspicious rcu_dereference_check() usage" +when nload runs. + +Fixes: 776e726bfb34 ("netvsc: fix RCU warning in get_stats") +Signed-off-by: Dexuan Cui +Signed-off-by: David S. Miller +Acked-by: Olaf Hering +--- + drivers/net/hyperv/netvsc_drv.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net, + struct rtnl_link_stats64 *t) + { + struct net_device_context *ndev_ctx = netdev_priv(net); +- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); ++ struct netvsc_device *nvdev; + struct netvsc_vf_pcpu_stats vf_tot; + int i; + ++ rcu_read_lock(); ++ ++ nvdev = rcu_dereference(ndev_ctx->nvdev); + if (!nvdev) +- return; ++ goto out; + + netdev_stats_to_stats64(t, &net->stats); + +@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net, + t->rx_packets += packets; + t->multicast += multicast; + } ++out: ++ rcu_read_unlock(); + } + + static int netvsc_set_mac_addr(struct net_device *ndev, void *p) diff --git a/patches.suse/msft-hv-1945-hv_netvsc-Allow-scatter-gather-feature-to-be-tunable.patch b/patches.suse/msft-hv-1945-hv_netvsc-Allow-scatter-gather-feature-to-be-tunable.patch new file mode 100644 index 0000000..69181e4 --- /dev/null +++ b/patches.suse/msft-hv-1945-hv_netvsc-Allow-scatter-gather-feature-to-be-tunable.patch @@ -0,0 +1,58 @@ +From: Haiyang Zhang +Date: Thu, 5 Sep 2019 23:23:07 +0000 +Patch-mainline: v5.4-rc1 +Subject: hv_netvsc: Allow scatter-gather feature to be tunable +Git-commit: b441f79532ec13dc82d05c55badc4da1f62a6141 +References: git-fixes + +In a previous patch, the NETIF_F_SG was missing after the code changes. +That caused the SG feature to be "fixed". This patch includes it into +hw_features, so it is tunable again. + +Fixes: 23312a3be999 ("netvsc: negotiate checksum and segmentation parameters") +Signed-off-by: Haiyang Zhang +Signed-off-by: David S. Miller +Acked-by: Olaf Hering +--- + drivers/net/hyperv/hyperv_net.h | 2 +- + drivers/net/hyperv/netvsc_drv.c | 4 ++-- + drivers/net/hyperv/rndis_filter.c | 1 + + 3 files changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h +--- a/drivers/net/hyperv/hyperv_net.h ++++ b/drivers/net/hyperv/hyperv_net.h +@@ -822,7 +822,7 @@ struct nvsp_message { + + #define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \ + NETIF_F_TSO | NETIF_F_IPV6_CSUM | \ +- NETIF_F_TSO6 | NETIF_F_LRO) ++ NETIF_F_TSO6 | NETIF_F_LRO | NETIF_F_SG) + + #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ + #define VRSS_CHANNEL_MAX 64 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -2313,8 +2313,8 @@ static int netvsc_probe(struct hv_device *dev, + + /* hw_features computed in rndis_netdev_set_hwcaps() */ + net->features = net->hw_features | +- NETIF_F_HIGHDMA | NETIF_F_SG | +- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; ++ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_HW_VLAN_CTAG_RX; + net->vlan_features = net->features; + + netdev_lockdep_set_classes(net); +diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c +--- a/drivers/net/hyperv/rndis_filter.c ++++ b/drivers/net/hyperv/rndis_filter.c +@@ -1207,6 +1207,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, + + /* Compute tx offload settings based on hw capabilities */ + net->hw_features |= NETIF_F_RXCSUM; ++ net->hw_features |= NETIF_F_SG; + + if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { + /* Can checksum TCP */ diff --git a/patches.suse/msft-hv-1961-hv_netvsc-flag-software-created-hash-value.patch b/patches.suse/msft-hv-1961-hv_netvsc-flag-software-created-hash-value.patch new file mode 100644 index 0000000..7317412 --- /dev/null +++ b/patches.suse/msft-hv-1961-hv_netvsc-flag-software-created-hash-value.patch @@ -0,0 +1,44 @@ +From: Stephen Hemminger +Date: Fri, 1 Nov 2019 16:42:37 -0700 +Patch-mainline: v5.5-rc1 +Subject: hv_netvsc: flag software created hash value +Git-commit: df9f540ca74297a84bafacfa197e9347b20beea5 +References: git-fixes + +When the driver needs to create a hash value because it +was not done at higher level, then the hash should be marked +as a software not hardware hash. + +Fixes: f72860afa2e3 ("hv_netvsc: Exclude non-TCP port numbers from vRSS hashing") +Signed-off-by: Stephen Hemminger +Signed-off-by: David S. Miller +Acked-by: Olaf Hering +--- + drivers/net/hyperv/netvsc_drv.c | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -285,9 +285,9 @@ static inline u32 netvsc_get_hash( + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); + else +- hash = 0; ++ return 0; + +- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); ++ __skb_set_sw_hash(skb, hash, false); + } + + return hash; +@@ -795,8 +795,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, + skb->protocol == htons(ETH_P_IP)) + netvsc_comp_ipcsum(skb); + +- /* Do L4 checksum offload if enabled and present. +- */ ++ /* Do L4 checksum offload if enabled and present. */ + if (csum_info && (net->features & NETIF_F_RXCSUM)) { + if (csum_info->receive.tcp_checksum_succeeded || + csum_info->receive.udp_checksum_succeeded) diff --git a/patches.suse/msft-hv-1980-Drivers-hv-balloon-Remove-dependencies-on-guest-page.patch b/patches.suse/msft-hv-1980-Drivers-hv-balloon-Remove-dependencies-on-guest-page.patch new file mode 100644 index 0000000..fe6df1e --- /dev/null +++ b/patches.suse/msft-hv-1980-Drivers-hv-balloon-Remove-dependencies-on-guest-page.patch @@ -0,0 +1,115 @@ +From: Himadri Pandya +Date: Sat, 17 Aug 2019 04:08:50 +0000 +Patch-mainline: v5.5-rc1 +Subject: Drivers: hv: balloon: Remove dependencies on guest page size +Git-commit: 2af5e7b7b230f3c6dcc9d72bb7eeda6e1df1f5fa +References: git-fixes + +Hyper-V assumes page size to be 4K. This might not be the case for +ARM64 architecture. Hence use hyper-v specific page size and page +shift definitions to avoid conflicts between different host and guest +page sizes on ARM64. + +Also, remove some old and incorrect comments and redefine ballooning +granularities to handle larger page sizes correctly. + +Signed-off-by: Himadri Pandya +Reviewed-by: Michael Kelley +Signed-off-by: Sasha Levin +Acked-by: Olaf Hering +--- + drivers/hv/hv_balloon.c | 25 ++++++++++++------------- + 1 file changed, 12 insertions(+), 13 deletions(-) + +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -23,6 +23,7 @@ + #include + + #include ++#include + + #define CREATE_TRACE_POINTS + #include "hv_trace_balloon.h" +@@ -341,8 +342,6 @@ struct dm_unballoon_response { + * + * mem_range: Memory range to hot add. + * +- * On Linux we currently don't support this since we cannot hot add +- * arbitrary granularity of memory. + */ + + struct dm_hot_add { +@@ -477,7 +476,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR)); + MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure"); + static atomic_t trans_id = ATOMIC_INIT(0); + +-static int dm_ring_size = (5 * PAGE_SIZE); ++static int dm_ring_size = 20 * 1024; + + /* + * Driver specific state. +@@ -493,10 +492,10 @@ enum hv_dm_state { + }; + + +-static __u8 recv_buffer[PAGE_SIZE]; +-static __u8 balloon_up_send_buffer[PAGE_SIZE]; +-#define PAGES_IN_2M 512 +-#define HA_CHUNK (32 * 1024) ++static __u8 recv_buffer[HV_HYP_PAGE_SIZE]; ++static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE]; ++#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE) ++#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE) + + struct hv_dynmem_device { + struct hv_device *dev; +@@ -1076,7 +1075,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) + __u64 *max_page_count = (__u64 *)&info_hdr[1]; + + pr_info("Max. dynamic memory size: %llu MB\n", +- (*max_page_count) >> (20 - PAGE_SHIFT)); ++ (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT)); + } + + break; +@@ -1218,7 +1217,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, + + for (i = 0; (i * alloc_unit) < num_pages; i++) { + if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > +- PAGE_SIZE) ++ HV_HYP_PAGE_SIZE) + return i * alloc_unit; + + /* +@@ -1274,9 +1273,9 @@ static void balloon_up(struct work_struct *dummy) + + /* + * We will attempt 2M allocations. However, if we fail to +- * allocate 2M chunks, we will go back to 4k allocations. ++ * allocate 2M chunks, we will go back to PAGE_SIZE allocations. + */ +- alloc_unit = 512; ++ alloc_unit = PAGES_IN_2M; + + avail_pages = si_mem_available(); + floor = compute_balloon_floor(); +@@ -1292,7 +1291,7 @@ static void balloon_up(struct work_struct *dummy) + } + + while (!done) { +- memset(balloon_up_send_buffer, 0, PAGE_SIZE); ++ memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE); + bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer; + bl_resp->hdr.type = DM_BALLOON_RESPONSE; + bl_resp->hdr.size = sizeof(struct dm_balloon_response); +@@ -1491,7 +1490,7 @@ static void balloon_onchannelcallback(void *context) + + memset(recv_buffer, 0, sizeof(recv_buffer)); + vmbus_recvpacket(dev->channel, recv_buffer, +- PAGE_SIZE, &recvlen, &requestid); ++ HV_HYP_PAGE_SIZE, &recvlen, &requestid); + + if (recvlen > 0) { + dm_msg = (struct dm_message *)recv_buffer; diff --git a/patches.suse/msft-hv-2011-hv_balloon-Balloon-up-according-to-request-page-numb.patch b/patches.suse/msft-hv-2011-hv_balloon-Balloon-up-according-to-request-page-numb.patch new file mode 100644 index 0000000..2f48c2f --- /dev/null +++ b/patches.suse/msft-hv-2011-hv_balloon-Balloon-up-according-to-request-page-numb.patch @@ -0,0 +1,75 @@ +From: Tianyu Lan +Date: Sat, 25 Jan 2020 16:50:47 -0500 +Patch-mainline: v5.6-rc1 +Subject: hv_balloon: Balloon up according to request page number +Git-commit: d33c240d47dab4fd15123d9e73fc8810cbc6ed6a +References: git-fixes + +Current code has assumption that balloon request memory size aligns +with 2MB. But actually Hyper-V doesn't guarantee such alignment. When +balloon driver receives non-aligned balloon request, it produces warning +and balloon up more memory than requested in order to keep 2MB alignment. +Remove the warning and balloon up memory according to actual requested +memory size. + +Fixes: f6712238471a ("hv: hv_balloon: avoid memory leak on alloc_error of 2MB memory block") +Cc: stable@vger.kernel.org +Reviewed-by: Vitaly Kuznetsov +Signed-off-by: Tianyu Lan +Reviewed-by: Michael Kelley +Signed-off-by: Sasha Levin +Acked-by: Olaf Hering +--- + drivers/hv/hv_balloon.c | 13 +++---------- + 1 file changed, 3 insertions(+), 10 deletions(-) + +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c +--- a/drivers/hv/hv_balloon.c ++++ b/drivers/hv/hv_balloon.c +@@ -1217,10 +1217,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, + unsigned int i = 0; + struct page *pg; + +- if (num_pages < alloc_unit) +- return 0; +- +- for (i = 0; (i * alloc_unit) < num_pages; i++) { ++ for (i = 0; i < num_pages / alloc_unit; i++) { + if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > + HV_HYP_PAGE_SIZE) + return i * alloc_unit; +@@ -1258,7 +1255,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, + + } + +- return num_pages; ++ return i * alloc_unit; + } + + static void balloon_up(struct work_struct *dummy) +@@ -1273,9 +1270,6 @@ static void balloon_up(struct work_struct *dummy) + long avail_pages; + unsigned long floor; + +- /* The host balloons pages in 2M granularity. */ +- WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0); +- + /* + * We will attempt 2M allocations. However, if we fail to + * allocate 2M chunks, we will go back to PAGE_SIZE allocations. +@@ -1285,14 +1279,13 @@ static void balloon_up(struct work_struct *dummy) + avail_pages = si_mem_available(); + floor = compute_balloon_floor(); + +- /* Refuse to balloon below the floor, keep the 2M granularity. */ ++ /* Refuse to balloon below the floor. */ + if (avail_pages < num_pages || avail_pages - num_pages < floor) { + pr_warn("Balloon request will be partially fulfilled. %s\n", + avail_pages < num_pages ? "Not enough memory." : + "Balloon floor reached."); + + num_pages = avail_pages > floor ? (avail_pages - floor) : 0; +- num_pages -= num_pages % PAGES_IN_2M; + } + + while (!done) { diff --git a/patches.suse/msft-hv-2126-hv_netvsc-do-not-use-VF-device-if-link-is-down.patch b/patches.suse/msft-hv-2126-hv_netvsc-do-not-use-VF-device-if-link-is-down.patch new file mode 100644 index 0000000..5e72963 --- /dev/null +++ b/patches.suse/msft-hv-2126-hv_netvsc-do-not-use-VF-device-if-link-is-down.patch @@ -0,0 +1,44 @@ +From: Stephen Hemminger +Date: Tue, 4 Aug 2020 09:54:15 -0700 +Patch-mainline: v5.9-rc1 +Subject: hv_netvsc: do not use VF device if link is down +Git-commit: 7c9864bbccc23e1812ac82966555d68c13ea4006 +References: git-fixes + +If the accelerated networking SRIOV VF device has lost carrier +use the synthetic network device which is available as backup +path. This is a rare case since if VF link goes down, normally +the VMBus device will also loose external connectivity as well. +But if the communication is between two VM's on the same host +the VMBus device will still work. + +Reported-by: "Shah, Ashish N" +Fixes: 0c195567a8f6 ("netvsc: transparent VF management") +Signed-off-by: Stephen Hemminger +Reviewed-by: Haiyang Zhang +Signed-off-by: David S. Miller +Acked-by: Olaf Hering +--- + drivers/net/hyperv/netvsc_drv.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -532,12 +532,13 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) + u32 hash; + struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; + +- /* if VF is present and up then redirect packets +- * already called with rcu_read_lock_bh ++ /* If VF is present and up then redirect packets to it. ++ * Skip the VF if it is marked down or has no carrier. ++ * If netpoll is in uses, then VF can not be used either. + */ + vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); + if (vf_netdev && netif_running(vf_netdev) && +- !netpoll_tx_running(net)) ++ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net)) + return netvsc_vf_xmit(net, vf_netdev, skb); + + /* We will atmost need two pages to describe the rndis diff --git a/patches.suse/msft-hv-2130-hv_netvsc-Remove-unlikely-from-netvsc_select_queue.patch b/patches.suse/msft-hv-2130-hv_netvsc-Remove-unlikely-from-netvsc_select_queue.patch new file mode 100644 index 0000000..d334ea3 --- /dev/null +++ b/patches.suse/msft-hv-2130-hv_netvsc-Remove-unlikely-from-netvsc_select_queue.patch @@ -0,0 +1,32 @@ +From: Haiyang Zhang +Date: Thu, 20 Aug 2020 14:53:14 -0700 +Patch-mainline: v5.9-rc2 +Subject: hv_netvsc: Remove "unlikely" from netvsc_select_queue +Git-commit: 4d820543c54c47a2bd3c95ddbf52f83c89a219a0 +References: git-fixes + +When using vf_ops->ndo_select_queue, the number of queues of VF is +usually bigger than the synthetic NIC. This condition may happen +often. +Remove "unlikely" from the comparison of ndev->real_num_tx_queues. + +Fixes: b3bf5666a510 ("hv_netvsc: defer queue selection to VF") +Signed-off-by: Haiyang Zhang +Signed-off-by: David S. Miller +Acked-by: Olaf Hering +--- + drivers/net/hyperv/netvsc_drv.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -367,7 +367,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + } + rcu_read_unlock(); + +- while (unlikely(txq >= ndev->real_num_tx_queues)) ++ while (txq >= ndev->real_num_tx_queues) + txq -= ndev->real_num_tx_queues; + + return txq; diff --git a/patches.suse/msft-hv-2131-hv_netvsc-Fix-the-queue_mapping-in-netvsc_vf_xmit.patch b/patches.suse/msft-hv-2131-hv_netvsc-Fix-the-queue_mapping-in-netvsc_vf_xmit.patch new file mode 100644 index 0000000..0033123 --- /dev/null +++ b/patches.suse/msft-hv-2131-hv_netvsc-Fix-the-queue_mapping-in-netvsc_vf_xmit.patch @@ -0,0 +1,39 @@ +From: Haiyang Zhang +Date: Thu, 20 Aug 2020 14:53:15 -0700 +Patch-mainline: v5.9-rc2 +Subject: hv_netvsc: Fix the queue_mapping in netvsc_vf_xmit() +Git-commit: c3d897e01aef8ddc43149e4d661b86f823e3aae7 +References: git-fixes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +netvsc_vf_xmit() / dev_queue_xmit() will call VF NIC’s ndo_select_queue +or netdev_pick_tx() again. They will use skb_get_rx_queue() to get the +queue number, so the “skb->queue_mapping - 1” will be used. This may +cause the last queue of VF not been used. + +Use skb_record_rx_queue() here, so that the skb_get_rx_queue() called +later will get the correct queue number, and VF will be able to use +all queues. + +Fixes: b3bf5666a510 ("hv_netvsc: defer queue selection to VF") +Signed-off-by: Haiyang Zhang +Signed-off-by: David S. Miller +Acked-by: Olaf Hering +--- + drivers/net/hyperv/netvsc_drv.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -502,7 +502,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, + int rc; + + skb->dev = vf_netdev; +- skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; ++ skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); + + rc = dev_queue_xmit(skb); + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { diff --git a/patches.suse/net-ipv6-add-net-argument-to-ip6_dst_lookup_flow.patch b/patches.suse/net-ipv6-add-net-argument-to-ip6_dst_lookup_flow.patch new file mode 100644 index 0000000..fbad3cf --- /dev/null +++ b/patches.suse/net-ipv6-add-net-argument-to-ip6_dst_lookup_flow.patch @@ -0,0 +1,219 @@ +From: Sabrina Dubroca +Date: Wed, 4 Dec 2019 15:35:52 +0100 +Subject: net: ipv6: add net argument to ip6_dst_lookup_flow +Patch-mainline: v5.5-rc1 +Git-commit: c4e85f73afb6384123e5ef1bba3315b2e3ad031e +References: CVE-2020-1749 bsc#1165629 + +This will be used in the conversion of ipv6_stub to ip6_dst_lookup_flow, +as some modules currently pass a net argument without a socket to +ip6_dst_lookup. This is equivalent to commit 343d60aada5a ("ipv6: change +ipv6_stub_impl.ipv6_dst_lookup to take net argument"). + +Signed-off-by: Sabrina Dubroca +Signed-off-by: David S. Miller +Acked-by: Michal Kubecek + +--- + include/net/ipv6.h | 2 +- + net/dccp/ipv6.c | 6 +++--- + net/ipv6/af_inet6.c | 2 +- + net/ipv6/datagram.c | 2 +- + net/ipv6/inet6_connection_sock.c | 4 ++-- + net/ipv6/ip6_output.c | 8 ++++---- + net/ipv6/raw.c | 2 +- + net/ipv6/syncookies.c | 2 +- + net/ipv6/tcp_ipv6.c | 4 ++-- + net/l2tp/l2tp_ip6.c | 2 +- + net/sctp/ipv6.c | 4 ++-- + 11 files changed, 19 insertions(+), 19 deletions(-) + +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -919,7 +919,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) + + int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, + struct flowi6 *fl6); +-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, ++struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst); + struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst); +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -209,7 +209,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req + final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + dst = NULL; +@@ -280,7 +280,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) + security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); + + /* sk = NULL, but it is safe for now. RST socket required. */ +- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); ++ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); + if (!IS_ERR(dst)) { + skb_dst_set(skb, dst); + ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); +@@ -888,7 +888,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); + final_p = fl6_update_dst(&fl6, opt, &final); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto failure; +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -713,7 +713,7 @@ int inet6_sk_rebuild_header(struct sock *sk) + &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + sk->sk_route_caps = 0; + sk->sk_err_soft = -PTR_ERR(dst); +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -88,7 +88,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr) + final_p = fl6_update_dst(&fl6, opt, &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; +--- a/net/ipv6/inet6_connection_sock.c ++++ b/net/ipv6/inet6_connection_sock.c +@@ -52,7 +52,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, + fl6->flowi6_uid = sk->sk_uid; + security_req_classify_flow(req, flowi6_to_flowi(fl6)); + +- dst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); + if (IS_ERR(dst)) + return NULL; + +@@ -107,7 +107,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, + + dst = __inet6_csk_dst_check(sk, np->dst_cookie); + if (!dst) { +- dst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); + + if (!IS_ERR(dst)) + ip6_dst_store(sk, dst, NULL, NULL); +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1082,19 +1082,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); + * It returns a valid dst pointer on success, or a pointer encoded + * error code. + */ +-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, ++struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, + const struct in6_addr *final_dst) + { + struct dst_entry *dst = NULL; + int err; + +- err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); ++ err = ip6_dst_lookup_tail(net, sk, &dst, fl6); + if (err) + return ERR_PTR(err); + if (final_dst) + fl6->daddr = *final_dst; + +- return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); ++ return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0); + } + EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); + +@@ -1119,7 +1119,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, + + dst = ip6_sk_dst_check(sk, dst, fl6); + if (!dst) +- dst = ip6_dst_lookup_flow(sk, fl6, final_dst); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst); + + return dst; + } +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -926,7 +926,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; +--- a/net/ipv6/syncookies.c ++++ b/net/ipv6/syncookies.c +@@ -237,7 +237,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) + fl6.flowi6_uid = sk->sk_uid; + security_req_classify_flow(req, flowi6_to_flowi(&fl6)); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) + goto out_free; + } +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -252,7 +252,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + + security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto failure; +@@ -851,7 +851,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 + * Underlying function will use this to retrieve the network + * namespace + */ +- dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); ++ dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); + if (!IS_ERR(dst)) { + skb_dst_set(buff, dst); + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); +--- a/net/l2tp/l2tp_ip6.c ++++ b/net/l2tp/l2tp_ip6.c +@@ -625,7 +625,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + +- dst = ip6_dst_lookup_flow(sk, &fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto out; +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -270,7 +270,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); + +- dst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); + if (!asoc || saddr) + goto out; + +@@ -323,7 +323,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + fl6->saddr = laddr->a.v6.sin6_addr; + fl6->fl6_sport = laddr->a.v6.sin6_port; + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); +- bdst = ip6_dst_lookup_flow(sk, fl6, final_p); ++ bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); + + if (IS_ERR(bdst)) + continue; diff --git a/patches.suse/net-ipv6_stub-use-ip6_dst_lookup_flow-instead-of-ip6.patch b/patches.suse/net-ipv6_stub-use-ip6_dst_lookup_flow-instead-of-ip6.patch new file mode 100644 index 0000000..6db6682 --- /dev/null +++ b/patches.suse/net-ipv6_stub-use-ip6_dst_lookup_flow-instead-of-ip6.patch @@ -0,0 +1,225 @@ +From: Sabrina Dubroca +Date: Wed, 4 Dec 2019 15:35:53 +0100 +Subject: net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup +Patch-mainline: v5.5-rc1 +Git-commit: 6c8991f41546c3c472503dff1ea9daaddf9331c2 +References: CVE-2020-1749 bsc#1165629 + +ipv6_stub uses the ip6_dst_lookup function to allow other modules to +perform IPv6 lookups. However, this function skips the XFRM layer +entirely. + +All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the +ip_route_output_key and ip_route_output helpers) for their IPv4 lookups, +which calls xfrm_lookup_route(). This patch fixes this inconsistent +behavior by switching the stub to ip6_dst_lookup_flow, which also calls +xfrm_lookup_route(). + +This requires some changes in all the callers, as these two functions +take different arguments and have different return types. + +Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan") +Reported-by: Xiumei Mu +Signed-off-by: Sabrina Dubroca +Signed-off-by: David S. Miller +Acked-by: Michal Kubecek + +--- + drivers/infiniband/core/addr.c | 7 +++---- + drivers/infiniband/sw/rxe/rxe_net.c | 8 +++++--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 9 ++++----- + drivers/net/geneve.c | 4 +++- + drivers/net/vxlan.c | 8 +++----- + include/net/addrconf.h | 6 ++++-- + net/ipv6/addrconf_core.c | 11 ++++++----- + net/ipv6/af_inet6.c | 2 +- + net/mpls/af_mpls.c | 7 +++---- + net/tipc/udp_media.c | 8 +++++--- + 10 files changed, 37 insertions(+), 33 deletions(-) + +--- a/drivers/infiniband/core/addr.c ++++ b/drivers/infiniband/core/addr.c +@@ -420,16 +420,15 @@ static int addr6_resolve(struct sockaddr *src_sock, + (const struct sockaddr_in6 *)dst_sock; + struct flowi6 fl6; + struct dst_entry *dst; +- int ret; + + memset(&fl6, 0, sizeof fl6); + fl6.daddr = dst_in->sin6_addr; + fl6.saddr = src_in->sin6_addr; + fl6.flowi6_oif = addr->bound_dev_if; + +- ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); +- if (ret < 0) +- return ret; ++ dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL); ++ if (IS_ERR(dst)) ++ return PTR_ERR(dst); + + if (ipv6_addr_any(&src_in->sin6_addr)) + src_in->sin6_addr = fl6.saddr; +--- a/drivers/infiniband/sw/rxe/rxe_net.c ++++ b/drivers/infiniband/sw/rxe/rxe_net.c +@@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, + memcpy(&fl6.daddr, daddr, sizeof(*daddr)); + fl6.flowi6_proto = IPPROTO_UDP; + +- if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), +- recv_sockets.sk6->sk, &ndst, &fl6))) { ++ ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), ++ recv_sockets.sk6->sk, &fl6, ++ NULL); ++ if (unlikely(IS_ERR(ndst))) { + pr_err_ratelimited("no route to %pI6\n", daddr); +- goto put; ++ return NULL; + } + + if (unlikely(ndst->error)) { +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2384,12 +2384,11 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; +- int ret; + +- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, +- fl6); +- if (ret < 0) +- return ret; ++ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, ++ NULL); ++ if (IS_ERR(dst)) ++ return PTR_ERR(dst); + + if (!(*out_ttl)) + *out_ttl = ip6_dst_hoplimit(dst); +--- a/drivers/net/geneve.c ++++ b/drivers/net/geneve.c +@@ -787,7 +787,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, + if (dst) + return dst; + } +- if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { ++ dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6, ++ NULL); ++ if (IS_ERR(dst)) { + netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr); + return ERR_PTR(-ENETUNREACH); + } +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -2023,7 +2023,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); + struct dst_entry *ndst; + struct flowi6 fl6; +- int err; + + if (!sock6) + return ERR_PTR(-EIO); +@@ -2046,10 +2045,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, + fl6.fl6_dport = dport; + fl6.fl6_sport = sport; + +- err = ipv6_stub->ipv6_dst_lookup(vxlan->net, +- sock6->sock->sk, +- &ndst, &fl6); +- if (unlikely(err < 0)) { ++ ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, ++ &fl6, NULL); ++ if (unlikely(IS_ERR(ndst))) { + netdev_dbg(dev, "no route to %pI6\n", daddr); + return ERR_PTR(-ENETUNREACH); + } +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -223,8 +223,10 @@ struct ipv6_stub { + const struct in6_addr *addr); + int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, + const struct in6_addr *addr); +- int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, +- struct dst_entry **dst, struct flowi6 *fl6); ++ struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, ++ const struct sock *sk, ++ struct flowi6 *fl6, ++ const struct in6_addr *final_dst); + void (*udpv6_encap_enable)(void); + void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, + const struct in6_addr *solicited_addr, +--- a/net/ipv6/addrconf_core.c ++++ b/net/ipv6/addrconf_core.c +@@ -127,15 +127,16 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) + } + EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); + +-static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1, +- struct dst_entry **u2, +- struct flowi6 *u3) ++static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, ++ const struct sock *sk, ++ struct flowi6 *fl6, ++ const struct in6_addr *final_dst) + { +- return -EAFNOSUPPORT; ++ return ERR_PTR(-EAFNOSUPPORT); + } + + const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { +- .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup, ++ .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, + }; + EXPORT_SYMBOL_GPL(ipv6_stub); + +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -898,7 +898,7 @@ static struct pernet_operations inet6_net_ops = { + static const struct ipv6_stub ipv6_stub_impl = { + .ipv6_sock_mc_join = ipv6_sock_mc_join, + .ipv6_sock_mc_drop = ipv6_sock_mc_drop, +- .ipv6_dst_lookup = ip6_dst_lookup, ++ .ipv6_dst_lookup_flow = ip6_dst_lookup_flow, + .udpv6_encap_enable = udpv6_encap_enable, + .ndisc_send_na = ndisc_send_na, + .nd_tbl = &nd_tbl, +--- a/net/mpls/af_mpls.c ++++ b/net/mpls/af_mpls.c +@@ -586,16 +586,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net, + struct net_device *dev; + struct dst_entry *dst; + struct flowi6 fl6; +- int err; + + if (!ipv6_stub) + return ERR_PTR(-EAFNOSUPPORT); + + memset(&fl6, 0, sizeof(fl6)); + memcpy(&fl6.daddr, addr, sizeof(struct in6_addr)); +- err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6); +- if (err) +- return ERR_PTR(err); ++ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL); ++ if (IS_ERR(dst)) ++ return ERR_CAST(dst); + + dev = dst->dev; + dev_hold(dev); +--- a/net/tipc/udp_media.c ++++ b/net/tipc/udp_media.c +@@ -187,10 +187,12 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, + .saddr = src->ipv6, + .flowi6_proto = IPPROTO_UDP + }; +- err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst, +- &fl6); +- if (err) ++ ndst = ipv6_stub->ipv6_dst_lookup_flow(net, ub->ubsock->sk, ++ &fl6, NULL); ++ if (IS_ERR(ndst)) { ++ err = PTR_ERR(ndst); + goto tx_error; ++ } + ttl = ip6_dst_hoplimit(ndst); + err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL, + &src->ipv6, &dst->ipv6, 0, ttl, 0, diff --git a/patches.suse/scsi-Fix-trivial-spelling.patch b/patches.suse/scsi-Fix-trivial-spelling.patch new file mode 100644 index 0000000..890d4dc --- /dev/null +++ b/patches.suse/scsi-Fix-trivial-spelling.patch @@ -0,0 +1,85 @@ +From: Kieran Bingham +Date: Tue, 9 Jun 2020 13:45:59 +0100 +Subject: scsi: Fix trivial spelling +Patch-mainline: v5.9-rc1 +Git-commit: 0a19a725c0ede422e8510283c0c08e1a8f5b3486 +References: bsc#1171688 bsc#1174003 + +The word 'descriptor' is misspelled throughout the tree. + +Fix it up accordingly: + decriptors -> descriptors + +Link: https://lore.kernel.org/r/20200609124610.3445662-7-kieran.bingham+renesas@ideasonboard.com +Signed-off-by: Kieran Bingham +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/ibmvscsi/ibmvfc.c | 2 +- + drivers/scsi/ibmvscsi/ibmvscsi.c | 2 +- + drivers/scsi/qla2xxx/qla_inline.h | 2 +- + drivers/scsi/qla2xxx/qla_iocb.c | 6 +++--- + 4 files changed, 6 insertions(+), 6 deletions(-) + +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -1358,7 +1358,7 @@ static void ibmvfc_map_sg_list(struct sc + } + + /** +- * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields ++ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields + * @scmd: Scsi_Cmnd with the scatterlist + * @evt: ibmvfc event struct + * @vfc_cmd: vfc_cmd that contains the memory descriptor +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -683,7 +683,7 @@ static int map_sg_list(struct scsi_cmnd + } + + /** +- * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields ++ * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields + * @cmd: Scsi_Cmnd with the scatterlist + * @srp_cmd: srp_cmd that contains the memory descriptor + * @dev: device for which to map dma memory +--- a/drivers/scsi/qla2xxx/qla_inline.h ++++ b/drivers/scsi/qla2xxx/qla_inline.h +@@ -11,7 +11,7 @@ + * Continuation Type 1 IOCBs to allocate. + * + * @vha: HA context +- * @dsds: number of data segment decriptors needed ++ * @dsds: number of data segment descriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -44,7 +44,7 @@ qla2x00_get_cmd_direction(srb_t *sp) + * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and + * Continuation Type 0 IOCBs to allocate. + * +- * @dsds: number of data segment decriptors needed ++ * @dsds: number of data segment descriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +@@ -66,7 +66,7 @@ qla2x00_calc_iocbs_32(uint16_t dsds) + * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and + * Continuation Type 1 IOCBs to allocate. + * +- * @dsds: number of data segment decriptors needed ++ * @dsds: number of data segment descriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +@@ -669,7 +669,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *s + * qla24xx_calc_dsd_lists() - Determine number of DSD list required + * for Command Type 6. + * +- * @dsds: number of data segment decriptors needed ++ * @dsds: number of data segment descriptors needed + * + * Returns the number of dsd list needed to store @dsds. + */ diff --git a/patches.suse/scsi-qla2xxx-Add-more-BUILD_BUG_ON-statements.patch b/patches.suse/scsi-qla2xxx-Add-more-BUILD_BUG_ON-statements.patch new file mode 100644 index 0000000..395eebe --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Add-more-BUILD_BUG_ON-statements.patch @@ -0,0 +1,140 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:02 -0700 +Subject: scsi: qla2xxx: Add more BUILD_BUG_ON() statements +Patch-mainline: v5.8-rc1 +Git-commit: 8a73a0e002b318d8ad79fe0e6b48e27ca30e527a +References: bsc#1171688 bsc#1174003 + +Before fixing the endianness annotations in data structures, make the +compiler verify the size of FC protocol and firmware data structures. + +Link: https://lore.kernel.org/r/20200518211712.11395-6-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_os.c | 58 +++++++++++++++++++++++++++++++++++++ + drivers/scsi/qla2xxx/tcm_qla2xxx.c | 14 ++++++++ + 2 files changed, 72 insertions(+) + +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -7835,13 +7835,19 @@ qla2x00_module_init(void) + { + int ret = 0; + ++ BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); + BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); + BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); + BUILD_BUG_ON(sizeof(cont_entry_t) != 64); + BUILD_BUG_ON(sizeof(init_cb_t) != 96); ++ BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); + BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); + BUILD_BUG_ON(sizeof(request_t) != 64); ++ BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); ++ BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); ++ BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); + BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); + BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); + BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); +@@ -7849,17 +7855,69 @@ qla2x00_module_init(void) + BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); + BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); + BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344); ++ BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); ++ BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); ++ BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); ++ BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); ++ BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); + BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); ++ BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); ++ BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); ++ BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); ++ BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); ++ BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); + BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); + BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); ++ BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); + BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); + BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); ++ BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); ++ BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); ++ BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); ++ BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); + BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); ++ BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); ++ BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); ++ BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); ++ BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); ++ BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); ++ BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); ++ BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); ++ BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); ++ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); ++ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); ++ BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); ++ BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); ++ BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); ++ BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); ++ BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); ++ BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); + BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); + BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); ++ BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); ++ BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); ++ BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); + BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); ++ BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); ++ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); + BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); ++ BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); + BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); ++ BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); ++ BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); ++ BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); ++ BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); ++ BUILD_BUG_ON(sizeof(sts_entry_t) != 64); ++ BUILD_BUG_ON(sizeof(sw_info_t) != 32); ++ BUILD_BUG_ON(sizeof(target_id_t) != 2); + + /* Allocate cache for SRBs. */ + srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -2015,6 +2015,20 @@ static int __init tcm_qla2xxx_init(void) + { + int ret; + ++ BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64); ++ BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32); ++ BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64); ++ BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12); ++ BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4); ++ BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); ++ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); ++ BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); ++ BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); ++ BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); ++ BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); ++ + ret = tcm_qla2xxx_register_configfs(); + if (ret < 0) + return ret; diff --git a/patches.suse/scsi-qla2xxx-Address-a-set-of-sparse-warnings.patch b/patches.suse/scsi-qla2xxx-Address-a-set-of-sparse-warnings.patch new file mode 100644 index 0000000..5fcbf1a --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Address-a-set-of-sparse-warnings.patch @@ -0,0 +1,53 @@ +From: Shyam Sundar +Date: Tue, 14 Jul 2020 21:33:58 -0700 +Subject: scsi: qla2xxx: Address a set of sparse warnings +Patch-mainline: v5.9-rc1 +Git-commit: 58101f1504ad9b36e34533ec1fb01a8de80aa6d6 +References: bsc#1171688 bsc#1174003 + +Fix the following sparse warnings: + +drivers/scsi/qla2xxx/qla_isr.c:881:23: warning: restricted __le16 degrades to integer +drivers/scsi/qla2xxx/qla_isr.c:881:23: warning: cast to restricted __le16 + +Link: https://lore.kernel.org/r/20200715043358.21668-1-njavali@marvell.com +Signed-off-by: Shyam Sundar +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_fw.h | 2 +- + drivers/scsi/qla2xxx/qla_isr.c | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_fw.h ++++ b/drivers/scsi/qla2xxx/qla_fw.h +@@ -610,7 +610,7 @@ struct sts_entry_24xx { + __le32 residual_len; /* FW calc residual transfer length. */ + + union { +- uint16_t reserved_1; ++ __le16 reserved_1; + __le16 nvme_rsp_pyld_len; + }; + +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -790,7 +790,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t + } + } + +-struct purex_item * ++static struct purex_item * + qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) + { + struct purex_item *item = NULL; +@@ -878,7 +878,7 @@ qla27xx_copy_fpin_pkt(struct scsi_qla_ho + struct purex_item *item; + void *fpin_pkt = NULL; + +- total_bytes = le16_to_cpu(purex->frame_size & 0x0FFF) ++ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) + - PURX_ELS_HEADER_SIZE; + pending_bytes = total_bytes; + entry_count = entry_count_remaining = purex->entry_count; diff --git a/patches.suse/scsi-qla2xxx-Allow-ql2xextended_error_logging-specia.patch b/patches.suse/scsi-qla2xxx-Allow-ql2xextended_error_logging-specia.patch new file mode 100644 index 0000000..c6bcfe8 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Allow-ql2xextended_error_logging-specia.patch @@ -0,0 +1,32 @@ +From: Arun Easi +Date: Thu, 6 Aug 2020 04:10:09 -0700 +Subject: scsi: qla2xxx: Allow ql2xextended_error_logging special value 1 to be + set anytime +Patch-mainline: v5.9-rc2 +Git-commit: 49030003a38a5e3688781e2ccf9550dcebf61282 +References: bsc#1171688 bsc#1174003 + +ql2xextended_error_logging can now be set to 1 to get the default mask +value, as opposed to at module load time only. + +Link: https://lore.kernel.org/r/20200806111014.28434-7-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Arun Easi +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_dbg.h | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/scsi/qla2xxx/qla_dbg.h ++++ b/drivers/scsi/qla2xxx/qla_dbg.h +@@ -380,5 +380,8 @@ extern int qla24xx_soft_reset(struct qla + static inline int + ql_mask_match(uint level) + { ++ if (ql2xextended_error_logging == 1) ++ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; ++ + return (level & ql2xextended_error_logging) == level; + } diff --git a/patches.suse/scsi-qla2xxx-Cast-explicitly-to-uint16_t-uint32_t.patch b/patches.suse/scsi-qla2xxx-Cast-explicitly-to-uint16_t-uint32_t.patch new file mode 100644 index 0000000..94fa596 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Cast-explicitly-to-uint16_t-uint32_t.patch @@ -0,0 +1,403 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:09 -0700 +Subject: scsi: qla2xxx: Cast explicitly to uint16_t / uint32_t +Patch-mainline: v5.8-rc1 +Git-commit: ab053c09ee2066a2fe62a755f1e64dbc8eddc17c +References: bsc#1171688 bsc#1174003 + +Casting a pointer to void * and relying on an implicit cast from void * +to uint16_t or uint32_t suppresses sparse warnings about endianness. Hence +cast explicitly to uint16_t and uint32_t. Additionally, remove superfluous +void * casts. + +Link: https://lore.kernel.org/r/20200518211712.11395-13-bvanassche@acm.org +Cc: Arun Easi +Cc: Nilesh Javali +Cc: Daniel Wagner +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Hannes Reinecke +Reviewed-by: Daniel Wagner +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.c | 4 ++-- + drivers/scsi/qla2xxx/qla_init.c | 26 +++++++++++++------------- + drivers/scsi/qla2xxx/qla_mbx.c | 6 +++--- + drivers/scsi/qla2xxx/qla_mid.c | 4 ++-- + drivers/scsi/qla2xxx/qla_mr.c | 4 ++-- + drivers/scsi/qla2xxx/qla_nvme.c | 4 ++-- + drivers/scsi/qla2xxx/qla_nx2.c | 4 ++-- + drivers/scsi/qla2xxx/qla_os.c | 10 +++++----- + drivers/scsi/qla2xxx/qla_sup.c | 12 ++++++------ + drivers/scsi/qla2xxx/qla_target.c | 4 ++-- + 10 files changed, 39 insertions(+), 39 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -115,7 +115,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data + { + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + dma_addr_t dump_dma = ha->gid_list_dma; +- uint32_t *chunk = (void *)ha->gid_list; ++ uint32_t *chunk = (uint32_t *)ha->gid_list; + uint32_t dwords = qla2x00_gid_list_size(ha) / 4; + uint32_t stat; + ulong i, j, timer = 6000000; +@@ -195,7 +195,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, + int rval = QLA_FUNCTION_FAILED; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + dma_addr_t dump_dma = ha->gid_list_dma; +- uint32_t *chunk = (void *)ha->gid_list; ++ uint32_t *chunk = (uint32_t *)ha->gid_list; + uint32_t dwords = qla2x00_gid_list_size(ha) / 4; + uint32_t stat; + ulong i, j, timer = 6000000; +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -994,7 +994,7 @@ static void qla24xx_async_gnl_sp_done(sr + + ql_dbg(ql_dbg_disc, vha, 0x20e8, + "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", +- __func__, (void *)&wwn, e->port_id[2], e->port_id[1], ++ __func__, &wwn, e->port_id[2], e->port_id[1], + e->port_id[0], e->current_login_state, e->last_login_state, + (loop_id & 0x7fff)); + } +@@ -1345,7 +1345,7 @@ int qla24xx_async_gpdb(struct scsi_qla_h + mb[9] = vha->vp_idx; + mb[10] = opt; + +- mbx->u.mbx.in = (void *)pd; ++ mbx->u.mbx.in = pd; + mbx->u.mbx.in_dma = pd_dma; + + sp->done = qla24xx_async_gpdb_sp_done; +@@ -4126,7 +4126,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) + req = ha->req_q_map[que]; + if (!req || !test_bit(que, ha->req_qid_map)) + continue; +- req->out_ptr = (void *)(req->ring + req->length); ++ req->out_ptr = (uint16_t *)(req->ring + req->length); + *req->out_ptr = 0; + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) + req->outstanding_cmds[cnt] = NULL; +@@ -4143,7 +4143,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) + rsp = ha->rsp_q_map[que]; + if (!rsp || !test_bit(que, ha->rsp_qid_map)) + continue; +- rsp->in_ptr = (void *)(rsp->ring + rsp->length); ++ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + *rsp->in_ptr = 0; + /* Initialize response queue entries */ + if (IS_QLAFX00(ha)) +@@ -7444,7 +7444,7 @@ qla27xx_check_image_status_signature(str + static ulong + qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) + { +- uint32_t *p = (void *)image_status; ++ uint32_t *p = (uint32_t *)image_status; + uint n = sizeof(*image_status) / sizeof(*p); + uint32_t sum = 0; + +@@ -7507,7 +7507,7 @@ qla28xx_get_aux_images( + goto check_sec_image; + } + +- qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status, ++ qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, + ha->flt_region_aux_img_status_pri, + sizeof(pri_aux_image_status) >> 2); + qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); +@@ -7540,7 +7540,7 @@ qla28xx_get_aux_images( + goto check_valid_image; + } + +- qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status, ++ qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, + ha->flt_region_aux_img_status_sec, + sizeof(sec_aux_image_status) >> 2); + qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); +@@ -7605,7 +7605,7 @@ qla27xx_get_active_image(struct scsi_qla + goto check_sec_image; + } + +- if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status), ++ if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, + ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != + QLA_SUCCESS) { + WARN_ON_ONCE(true); +@@ -7712,7 +7712,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t + ql_dbg(ql_dbg_init, vha, 0x008b, + "FW: Loading firmware from flash (%x).\n", faddr); + +- dcode = (void *)req->ring; ++ dcode = (uint32_t *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_fatal, vha, 0x008c, +@@ -7725,7 +7725,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t + return QLA_FUNCTION_FAILED; + } + +- dcode = (void *)req->ring; ++ dcode = (uint32_t *)req->ring; + *srisc_addr = 0; + segments = FA_RISC_CODE_SEGMENTS; + for (j = 0; j < segments; j++) { +@@ -7776,7 +7776,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t + fwdt->template = NULL; + fwdt->length = 0; + +- dcode = (void *)req->ring; ++ dcode = (uint32_t *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 7); + risc_size = be32_to_cpu(dcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0161, +@@ -7968,7 +7968,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t * + return QLA_FUNCTION_FAILED; + } + +- fwcode = (void *)blob->fw->data; ++ fwcode = (uint32_t *)blob->fw->data; + dcode = fwcode; + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_fatal, vha, 0x0093, +@@ -7980,7 +7980,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t * + return QLA_FUNCTION_FAILED; + } + +- dcode = (void *)req->ring; ++ dcode = (uint32_t *)req->ring; + *srisc_addr = 0; + segments = FA_RISC_CODE_SEGMENTS; + for (j = 0; j < segments; j++) { +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -3038,7 +3038,7 @@ qla2x00_get_link_status(scsi_qla_host_t + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; +- uint32_t *iter = (void *)stats; ++ uint32_t *iter = (uint32_t *)stats; + ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); + struct qla_hw_data *ha = vha->hw; + +@@ -3097,7 +3097,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *v + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; +- uint32_t *iter = (void *)stats; ++ uint32_t *iter = (uint32_t *)stats; + ushort dwords = sizeof(*stats)/sizeof(*iter); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, +@@ -4736,7 +4736,7 @@ qla82xx_set_driver_version(scsi_qla_host + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, + "Entered %s.\n", __func__); + +- str = (void *)version; ++ str = (uint16_t *)version; + len = strlen(version); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; +--- a/drivers/scsi/qla2xxx/qla_mid.c ++++ b/drivers/scsi/qla2xxx/qla_mid.c +@@ -770,7 +770,7 @@ qla25xx_create_req_que(struct qla_hw_dat + req->req_q_in = ®->isp25mq.req_q_in; + req->req_q_out = ®->isp25mq.req_q_out; + req->max_q_depth = ha->req_q_map[0]->max_q_depth; +- req->out_ptr = (void *)(req->ring + req->length); ++ req->out_ptr = (uint16_t *)(req->ring + req->length); + mutex_unlock(&ha->mq_lock); + ql_dbg(ql_dbg_multiq, base_vha, 0xc004, + "ring_ptr=%p ring_index=%d, " +@@ -884,7 +884,7 @@ qla25xx_create_rsp_que(struct qla_hw_dat + reg = ISP_QUE_REG(ha, que_id); + rsp->rsp_q_in = ®->isp25mq.rsp_q_in; + rsp->rsp_q_out = ®->isp25mq.rsp_q_out; +- rsp->in_ptr = (void *)(rsp->ring + rsp->length); ++ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + mutex_unlock(&ha->mq_lock); + ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", +--- a/drivers/scsi/qla2xxx/qla_mr.c ++++ b/drivers/scsi/qla2xxx/qla_mr.c +@@ -3212,7 +3212,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mg + sizeof(struct scsi_lun)); + } + +- memcpy((void *)ptm_iocb, &tm_iocb, ++ memcpy(ptm_iocb, &tm_iocb, + sizeof(struct tsk_mgmt_entry_fx00)); + wmb(); + } +@@ -3234,7 +3234,7 @@ qlafx00_abort_iocb(srb_t *sp, struct abo + abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); + abt_iocb.req_que_no = cpu_to_le16(req->id); + +- memcpy((void *)pabt_iocb, &abt_iocb, ++ memcpy(pabt_iocb, &abt_iocb, + sizeof(struct abort_iocb_entry_fx00)); + wmb(); + } +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -295,7 +295,7 @@ static int qla_nvme_ls_req(struct nvme_f + sp->name = "nvme_ls"; + sp->done = qla_nvme_sp_ls_done; + sp->put_fn = qla_nvme_release_ls_cmd_kref; +- sp->priv = (void *)priv; ++ sp->priv = priv; + priv->sp = sp; + kref_init(&sp->cmd_kref); + spin_lock_init(&priv->cmd_lock); +@@ -571,7 +571,7 @@ static int qla_nvme_post_cmd(struct nvme + init_waitqueue_head(&sp->nvme_ls_waitq); + kref_init(&sp->cmd_kref); + spin_lock_init(&priv->cmd_lock); +- sp->priv = (void *)priv; ++ sp->priv = priv; + priv->sp = sp; + sp->type = SRB_NVME_CMD; + sp->name = "nvme_cmd"; +--- a/drivers/scsi/qla2xxx/qla_nx2.c ++++ b/drivers/scsi/qla2xxx/qla_nx2.c +@@ -2965,7 +2965,7 @@ qla8044_minidump_pex_dma_read(struct scs + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla8044_ms_mem_write_128b(vha, +- m_hdr->desc_card_addr, (void *)&dma_desc, ++ m_hdr->desc_card_addr, (uint32_t *)&dma_desc, + (sizeof(struct qla8044_pex_dma_descriptor)/16)); + if (rval) { + ql_log(ql_log_warn, vha, 0xb14a, +@@ -2987,7 +2987,7 @@ qla8044_minidump_pex_dma_read(struct scs + read_size += chunk_size; + } + +- *d_ptr = (void *)data_ptr; ++ *d_ptr = (uint32_t *)data_ptr; + + error_exit: + if (rdmem_buffer) +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -5929,7 +5929,7 @@ void qla24xx_process_purex_rdp(struct sc + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, +- (void *)purex, sizeof(*purex)); ++ purex, sizeof(*purex)); + + if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { + rsp_payload_length = +@@ -6045,7 +6045,7 @@ void qla24xx_process_purex_rdp(struct sc + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); + if (!rval) { +- uint16_t *trx = (void *)sfp; /* already be16 */ ++ uint16_t *trx = (uint16_t *)sfp; /* already be16 */ + rsp_payload->sfp_diag_desc.temperature = trx[0]; + rsp_payload->sfp_diag_desc.vcc = trx[1]; + rsp_payload->sfp_diag_desc.tx_bias = trx[2]; +@@ -6154,7 +6154,7 @@ void qla24xx_process_purex_rdp(struct sc + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); + if (!rval) { +- uint16_t *trx = (void *)sfp; /* already be16 */ ++ uint16_t *trx = (uint16_t *)sfp; /* already be16 */ + + /* Optical Element Descriptor, Temperature */ + rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; +@@ -6280,11 +6280,11 @@ void qla24xx_process_purex_rdp(struct sc + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, +- (void *)rsp_els, sizeof(*rsp_els)); ++ rsp_els, sizeof(*rsp_els)); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, + "-------- ELS RSP PAYLOAD -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, +- (void *)rsp_payload, rsp_payload_length); ++ rsp_payload, rsp_payload_length); + + rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); + +--- a/drivers/scsi/qla2xxx/qla_sup.c ++++ b/drivers/scsi/qla2xxx/qla_sup.c +@@ -553,7 +553,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t * + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct qla_flt_location *fltl = (void *)req->ring; +- uint32_t *dcode = (void *)req->ring; ++ uint32_t *dcode = (uint32_t *)req->ring; + uint8_t *buf = (void *)req->ring, *bcode, last_image; + + /* +@@ -610,7 +610,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t * + if (memcmp(fltl->sig, "QFLT", 4)) + goto end; + +- wptr = (void *)req->ring; ++ wptr = (uint16_t *)req->ring; + cnt = sizeof(*fltl) / sizeof(*wptr); + for (chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); +@@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vh + + ha->flt_region_flt = flt_addr; + wptr = (uint16_t *)ha->flt; +- ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2, ++ ha->isp_ops->read_optrom(vha, flt, flt_addr << 2, + (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE)); + + if (le16_to_cpu(*wptr) == 0xffff) +@@ -949,7 +949,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vh + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + uint16_t cnt, chksum; +- uint16_t *wptr = (void *)req->ring; ++ uint16_t *wptr = (uint16_t *)req->ring; + struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring; + uint8_t man_id, flash_id; + uint16_t mid = 0, fid = 0; +@@ -2610,7 +2610,7 @@ qla24xx_read_optrom_data(struct scsi_qla + set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + + /* Go with read. */ +- qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2); ++ qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + + /* Resume HBA. */ + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); +@@ -3528,7 +3528,7 @@ qla24xx_get_flash_version(scsi_qla_host_ + + memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); + faddr = ha->flt_region_gold_fw; +- qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8); ++ qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_warn, vha, 0x0056, + "Unrecognized golden fw at %#x.\n", faddr); +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -3894,7 +3894,7 @@ static void *qlt_ctio_to_cmd(struct scsi + return NULL; + } + +- cmd = (void *) req->outstanding_cmds[h]; ++ cmd = req->outstanding_cmds[h]; + if (unlikely(cmd == NULL)) { + ql_dbg(ql_dbg_async, vha, 0xe053, + "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", +@@ -5941,7 +5941,7 @@ void qlt_async_event(uint16_t code, stru + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(ha->base_qpair, +- (void *)&tgt->link_reinit_iocb, ++ &tgt->link_reinit_iocb, + 0, 0, 0, 0, 0, 0); + tgt->link_reinit_iocb_pending = 0; + } diff --git a/patches.suse/scsi-qla2xxx-Change-RD-WRT-_REG_-function-names-from.patch b/patches.suse/scsi-qla2xxx-Change-RD-WRT-_REG_-function-names-from.patch new file mode 100644 index 0000000..24166ef --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Change-RD-WRT-_REG_-function-names-from.patch @@ -0,0 +1,3831 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:08 -0700 +Subject: scsi: qla2xxx: Change {RD,WRT}_REG_*() function names from upper case + into lower case +Patch-mainline: v5.8-rc1 +Git-commit: 04474d3a1c968119e7214c312b273dee01258cad +References: bsc#1171688 bsc#1174003 + +This was suggested by Daniel Wagner. + +Link: https://lore.kernel.org/r/20200518211712.11395-12-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Arun Easi +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.c | 588 +++++++++++++++++++------------------- + drivers/scsi/qla2xxx/qla_def.h | 26 - + drivers/scsi/qla2xxx/qla_init.c | 205 ++++++------- + drivers/scsi/qla2xxx/qla_inline.h | 6 + drivers/scsi/qla2xxx/qla_iocb.c | 64 ++-- + drivers/scsi/qla2xxx/qla_isr.c | 128 ++++---- + drivers/scsi/qla2xxx/qla_mbx.c | 74 ++-- + drivers/scsi/qla2xxx/qla_mr.c | 94 +++--- + drivers/scsi/qla2xxx/qla_mr.h | 24 - + drivers/scsi/qla2xxx/qla_nvme.c | 4 + drivers/scsi/qla2xxx/qla_nx.c | 68 ++-- + drivers/scsi/qla2xxx/qla_nx2.c | 12 + drivers/scsi/qla2xxx/qla_os.c | 26 - + drivers/scsi/qla2xxx/qla_sup.c | 244 +++++++-------- + drivers/scsi/qla2xxx/qla_target.c | 10 + drivers/scsi/qla2xxx/qla_tmpl.c | 14 + 16 files changed, 793 insertions(+), 794 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -126,26 +126,26 @@ qla27xx_dump_mpi_ram(struct qla_hw_data + if (i + dwords > ram_dwords) + dwords = ram_dwords - i; + +- WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); +- WRT_REG_WORD(®->mailbox1, LSW(addr)); +- WRT_REG_WORD(®->mailbox8, MSW(addr)); +- +- WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); +- WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); +- WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); +- WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); ++ wrt_reg_word(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); ++ wrt_reg_word(®->mailbox1, LSW(addr)); ++ wrt_reg_word(®->mailbox8, MSW(addr)); ++ ++ wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); ++ wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); ++ wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); ++ wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); + +- WRT_REG_WORD(®->mailbox4, MSW(dwords)); +- WRT_REG_WORD(®->mailbox5, LSW(dwords)); ++ wrt_reg_word(®->mailbox4, MSW(dwords)); ++ wrt_reg_word(®->mailbox5, LSW(dwords)); + +- WRT_REG_WORD(®->mailbox9, 0); +- WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ++ wrt_reg_word(®->mailbox9, 0); ++ wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + + ha->flags.mbox_int = 0; + while (timer--) { + udelay(5); + +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + /* Check for pending interrupts. */ + if (!(stat & HSRX_RISC_INT)) + continue; +@@ -155,15 +155,15 @@ qla27xx_dump_mpi_ram(struct qla_hw_data + stat != 0x10 && stat != 0x11) { + + /* Clear this intr; it wasn't a mailbox intr */ +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); ++ rd_reg_dword(®->hccr); + continue; + } + + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); +- rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +- RD_REG_DWORD(®->hccr); ++ rval = rd_reg_word(®->mailbox0) & MBS_MASK; ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); ++ rd_reg_dword(®->hccr); + break; + } + ha->flags.mbox_int = 1; +@@ -206,23 +206,23 @@ qla24xx_dump_ram(struct qla_hw_data *ha, + if (i + dwords > ram_dwords) + dwords = ram_dwords - i; + +- WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); +- WRT_REG_WORD(®->mailbox1, LSW(addr)); +- WRT_REG_WORD(®->mailbox8, MSW(addr)); +- +- WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); +- WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); +- WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); +- WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); +- +- WRT_REG_WORD(®->mailbox4, MSW(dwords)); +- WRT_REG_WORD(®->mailbox5, LSW(dwords)); +- WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ++ wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); ++ wrt_reg_word(®->mailbox1, LSW(addr)); ++ wrt_reg_word(®->mailbox8, MSW(addr)); ++ ++ wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); ++ wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); ++ wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); ++ wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); ++ ++ wrt_reg_word(®->mailbox4, MSW(dwords)); ++ wrt_reg_word(®->mailbox5, LSW(dwords)); ++ wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + + ha->flags.mbox_int = 0; + while (timer--) { + udelay(5); +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + + /* Check for pending interrupts. */ + if (!(stat & HSRX_RISC_INT)) +@@ -231,15 +231,15 @@ qla24xx_dump_ram(struct qla_hw_data *ha, + stat &= 0xff; + if (stat != 0x1 && stat != 0x2 && + stat != 0x10 && stat != 0x11) { +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); ++ rd_reg_dword(®->hccr); + continue; + } + + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); +- rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +- RD_REG_DWORD(®->hccr); ++ rval = rd_reg_word(®->mailbox0) & MBS_MASK; ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); ++ rd_reg_dword(®->hccr); + break; + } + ha->flags.mbox_int = 1; +@@ -292,10 +292,10 @@ qla24xx_read_window(struct device_reg_24 + { + uint32_t __iomem *dmp_reg; + +- WRT_REG_DWORD(®->iobase_addr, iobase); ++ wrt_reg_dword(®->iobase_addr, iobase); + dmp_reg = ®->iobase_window; + for ( ; count--; dmp_reg++) +- *buf++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *buf++ = htonl(rd_reg_dword(dmp_reg)); + + return buf; + } +@@ -303,11 +303,11 @@ qla24xx_read_window(struct device_reg_24 + void + qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) + { +- WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); ++ wrt_reg_dword(®->hccr, HCCRX_SET_RISC_PAUSE); + + /* 100 usec delay is sufficient enough for hardware to pause RISC */ + udelay(100); +- if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) ++ if (rd_reg_dword(®->host_status) & HSRX_RISC_PAUSED) + set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); + } + +@@ -324,17 +324,17 @@ qla24xx_soft_reset(struct qla_hw_data *h + * Driver can proceed with the reset sequence after waiting + * for a timeout period. + */ +- WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); ++ wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) ++ if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) + break; + + udelay(10); + } +- if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) ++ if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); + +- WRT_REG_DWORD(®->ctrl_status, ++ wrt_reg_dword(®->ctrl_status, + CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + +@@ -342,19 +342,19 @@ qla24xx_soft_reset(struct qla_hw_data *h + + /* Wait for soft-reset to complete. */ + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_DWORD(®->ctrl_status) & ++ if ((rd_reg_dword(®->ctrl_status) & + CSRX_ISP_SOFT_RESET) == 0) + break; + + udelay(10); + } +- if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) ++ if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); + +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); +- RD_REG_DWORD(®->hccr); /* PCI Posting. */ ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); ++ rd_reg_dword(®->hccr); /* PCI Posting. */ + +- for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && ++ for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(10); +@@ -399,11 +399,11 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, + WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); + + WRT_MAILBOX_REG(ha, reg, 4, words); +- WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); ++ wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); + + for (timer = 6000000; timer; timer--) { + /* Check for pending interrupts. */ +- stat = RD_REG_DWORD(®->u.isp2300.host_status); ++ stat = rd_reg_dword(®->u.isp2300.host_status); + if (stat & HSR_RISC_INT) { + stat &= 0xff; + +@@ -414,10 +414,10 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, + mb0 = RD_MAILBOX_REG(ha, reg, 0); + + /* Release mailbox registers. */ +- WRT_REG_WORD(®->semaphore, 0); +- WRT_REG_WORD(®->hccr, ++ wrt_reg_word(®->semaphore, 0); ++ wrt_reg_word(®->hccr, + HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ rd_reg_word(®->hccr); + break; + } else if (stat == 0x10 || stat == 0x11) { + set_bit(MBX_INTERRUPT, +@@ -425,15 +425,15 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, + + mb0 = RD_MAILBOX_REG(ha, reg, 0); + +- WRT_REG_WORD(®->hccr, ++ wrt_reg_word(®->hccr, + HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ rd_reg_word(®->hccr); + break; + } + + /* clear this intr; it wasn't a mailbox intr */ +- WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); ++ rd_reg_word(®->hccr); + } + udelay(5); + } +@@ -458,7 +458,7 @@ qla2xxx_read_window(struct device_reg_2x + uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; + + for ( ; count--; dmp_reg++) +- *buf++ = htons(RD_REG_WORD(dmp_reg)); ++ *buf++ = htons(rd_reg_word(dmp_reg)); + } + + static inline void * +@@ -685,13 +685,13 @@ qla25xx_copy_mq(struct qla_hw_data *ha, + reg = ISP_QUE_REG(ha, cnt); + que_idx = cnt * 4; + mq->qregs[que_idx] = +- htonl(RD_REG_DWORD(®->isp25mq.req_q_in)); ++ htonl(rd_reg_dword(®->isp25mq.req_q_in)); + mq->qregs[que_idx+1] = +- htonl(RD_REG_DWORD(®->isp25mq.req_q_out)); ++ htonl(rd_reg_dword(®->isp25mq.req_q_out)); + mq->qregs[que_idx+2] = +- htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in)); ++ htonl(rd_reg_dword(®->isp25mq.rsp_q_in)); + mq->qregs[que_idx+3] = +- htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out)); ++ htonl(rd_reg_dword(®->isp25mq.rsp_q_out)); + } + + return ptr + sizeof(struct qla2xxx_mq_chain); +@@ -760,13 +760,13 @@ qla2300_fw_dump(scsi_qla_host_t *vha) + qla2xxx_prep_dump(ha, ha->fw_dump); + + rval = QLA_SUCCESS; +- fw->hccr = htons(RD_REG_WORD(®->hccr)); ++ fw->hccr = htons(rd_reg_word(®->hccr)); + + /* Pause RISC. */ +- WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); ++ wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + if (IS_QLA2300(ha)) { + for (cnt = 30000; +- (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && ++ (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); +@@ -774,74 +774,74 @@ qla2300_fw_dump(scsi_qla_host_t *vha) + rval = QLA_FUNCTION_TIMEOUT; + } + } else { +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ rd_reg_word(®->hccr); /* PCI Posting. */ + udelay(10); + } + + if (rval == QLA_SUCCESS) { + dmp_reg = ®->flash_address; + for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) +- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + dmp_reg = ®->u.isp2300.req_q_in; + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); + cnt++, dmp_reg++) +- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + dmp_reg = ®->u.isp2300.mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); + cnt++, dmp_reg++) +- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); + +- WRT_REG_WORD(®->ctrl_status, 0x40); ++ wrt_reg_word(®->ctrl_status, 0x40); + qla2xxx_read_window(reg, 32, fw->resp_dma_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x50); ++ wrt_reg_word(®->ctrl_status, 0x50); + qla2xxx_read_window(reg, 48, fw->dma_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x00); ++ wrt_reg_word(®->ctrl_status, 0x00); + dmp_reg = ®->risc_hw; + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); + cnt++, dmp_reg++) +- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); + +- WRT_REG_WORD(®->pcr, 0x2000); ++ wrt_reg_word(®->pcr, 0x2000); + qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); + +- WRT_REG_WORD(®->pcr, 0x2200); ++ wrt_reg_word(®->pcr, 0x2200); + qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); + +- WRT_REG_WORD(®->pcr, 0x2400); ++ wrt_reg_word(®->pcr, 0x2400); + qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); + +- WRT_REG_WORD(®->pcr, 0x2600); ++ wrt_reg_word(®->pcr, 0x2600); + qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); + +- WRT_REG_WORD(®->pcr, 0x2800); ++ wrt_reg_word(®->pcr, 0x2800); + qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); + +- WRT_REG_WORD(®->pcr, 0x2A00); ++ wrt_reg_word(®->pcr, 0x2A00); + qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); + +- WRT_REG_WORD(®->pcr, 0x2C00); ++ wrt_reg_word(®->pcr, 0x2C00); + qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); + +- WRT_REG_WORD(®->pcr, 0x2E00); ++ wrt_reg_word(®->pcr, 0x2E00); + qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x10); ++ wrt_reg_word(®->ctrl_status, 0x10); + qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x20); ++ wrt_reg_word(®->ctrl_status, 0x20); + qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x30); ++ wrt_reg_word(®->ctrl_status, 0x30); + qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); + + /* Reset RISC. */ +- WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); ++ wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_WORD(®->ctrl_status) & ++ if ((rd_reg_word(®->ctrl_status) & + CSR_ISP_SOFT_RESET) == 0) + break; + +@@ -916,11 +916,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + qla2xxx_prep_dump(ha, ha->fw_dump); + + rval = QLA_SUCCESS; +- fw->hccr = htons(RD_REG_WORD(®->hccr)); ++ fw->hccr = htons(rd_reg_word(®->hccr)); + + /* Pause RISC. */ +- WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); +- for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && ++ wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); ++ for (cnt = 30000; (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); +@@ -930,60 +930,60 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + if (rval == QLA_SUCCESS) { + dmp_reg = ®->flash_address; + for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) +- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); + + dmp_reg = ®->u.isp2100.mailbox0; + for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { + if (cnt == 8) + dmp_reg = ®->u_end.isp2200.mailbox8; + +- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); + } + + dmp_reg = ®->u.isp2100.unused_2[0]; + for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) +- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg)); + +- WRT_REG_WORD(®->ctrl_status, 0x00); ++ wrt_reg_word(®->ctrl_status, 0x00); + dmp_reg = ®->risc_hw; + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) +- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); ++ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); + +- WRT_REG_WORD(®->pcr, 0x2000); ++ wrt_reg_word(®->pcr, 0x2000); + qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); + +- WRT_REG_WORD(®->pcr, 0x2100); ++ wrt_reg_word(®->pcr, 0x2100); + qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); + +- WRT_REG_WORD(®->pcr, 0x2200); ++ wrt_reg_word(®->pcr, 0x2200); + qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); + +- WRT_REG_WORD(®->pcr, 0x2300); ++ wrt_reg_word(®->pcr, 0x2300); + qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); + +- WRT_REG_WORD(®->pcr, 0x2400); ++ wrt_reg_word(®->pcr, 0x2400); + qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); + +- WRT_REG_WORD(®->pcr, 0x2500); ++ wrt_reg_word(®->pcr, 0x2500); + qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); + +- WRT_REG_WORD(®->pcr, 0x2600); ++ wrt_reg_word(®->pcr, 0x2600); + qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); + +- WRT_REG_WORD(®->pcr, 0x2700); ++ wrt_reg_word(®->pcr, 0x2700); + qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x10); ++ wrt_reg_word(®->ctrl_status, 0x10); + qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x20); ++ wrt_reg_word(®->ctrl_status, 0x20); + qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); + +- WRT_REG_WORD(®->ctrl_status, 0x30); ++ wrt_reg_word(®->ctrl_status, 0x30); + qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); + + /* Reset the ISP. */ +- WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); ++ wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + } + + for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && +@@ -996,11 +996,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + + /* Pause RISC. */ + if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && +- (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { ++ (rd_reg_word(®->mctr) & (BIT_1 | BIT_0)) != 0))) { + +- WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); ++ wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + for (cnt = 30000; +- (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && ++ (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) + udelay(100); +@@ -1010,13 +1010,13 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + if (rval == QLA_SUCCESS) { + /* Set memory configuration and timing. */ + if (IS_QLA2100(ha)) +- WRT_REG_WORD(®->mctr, 0xf1); ++ wrt_reg_word(®->mctr, 0xf1); + else +- WRT_REG_WORD(®->mctr, 0xf2); +- RD_REG_WORD(®->mctr); /* PCI Posting. */ ++ wrt_reg_word(®->mctr, 0xf2); ++ rd_reg_word(®->mctr); /* PCI Posting. */ + + /* Release RISC. */ +- WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); ++ wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + } + } + +@@ -1029,26 +1029,26 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; + cnt++, risc_address++) { + WRT_MAILBOX_REG(ha, reg, 1, risc_address); +- WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); ++ wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); + + for (timer = 6000000; timer != 0; timer--) { + /* Check for pending interrupts. */ +- if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { +- if (RD_REG_WORD(®->semaphore) & BIT_0) { ++ if (rd_reg_word(®->istatus) & ISR_RISC_INT) { ++ if (rd_reg_word(®->semaphore) & BIT_0) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + + mb0 = RD_MAILBOX_REG(ha, reg, 0); + mb2 = RD_MAILBOX_REG(ha, reg, 2); + +- WRT_REG_WORD(®->semaphore, 0); +- WRT_REG_WORD(®->hccr, ++ wrt_reg_word(®->semaphore, 0); ++ wrt_reg_word(®->hccr, + HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ rd_reg_word(®->hccr); + break; + } +- WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); ++ rd_reg_word(®->hccr); + } + udelay(5); + } +@@ -1107,7 +1107,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) + fw = &ha->fw_dump->isp.isp24; + qla2xxx_prep_dump(ha, ha->fw_dump); + +- fw->host_status = htonl(RD_REG_DWORD(®->host_status)); ++ fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip +@@ -1118,40 +1118,40 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) +- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ +- WRT_REG_DWORD(®->ictrl, 0); +- RD_REG_DWORD(®->ictrl); ++ wrt_reg_dword(®->ictrl, 0); ++ rd_reg_dword(®->ictrl); + + /* Shadow registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0F70); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_select, 0xB0000000); +- fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_addr, 0x0F70); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_select, 0xB0000000); ++ fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0100000); +- fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0100000); ++ fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0200000); +- fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0200000); ++ fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0300000); +- fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0300000); ++ fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0400000); +- fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0400000); ++ fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0500000); +- fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0500000); ++ fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0600000); +- fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0600000); ++ fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) +- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); ++ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; +@@ -1190,19 +1190,19 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; +@@ -1350,7 +1350,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) + qla2xxx_prep_dump(ha, ha->fw_dump); + ha->fw_dump->version = htonl(2); + +- fw->host_status = htonl(RD_REG_DWORD(®->host_status)); ++ fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip +@@ -1364,73 +1364,73 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) + qla24xx_read_window(reg, 0x7010, 16, iter_reg); + + /* PCIe registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x7C00); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_window, 0x01); ++ wrt_reg_dword(®->iobase_addr, 0x7C00); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_window, 0x01); + dmp_reg = ®->iobase_c4; +- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; +- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; +- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); +- fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); ++ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); ++ fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + +- WRT_REG_DWORD(®->iobase_window, 0x00); +- RD_REG_DWORD(®->iobase_window); ++ wrt_reg_dword(®->iobase_window, 0x00); ++ rd_reg_dword(®->iobase_window); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) +- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ +- WRT_REG_DWORD(®->ictrl, 0); +- RD_REG_DWORD(®->ictrl); ++ wrt_reg_dword(®->ictrl, 0); ++ rd_reg_dword(®->ictrl); + + /* Shadow registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0F70); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_select, 0xB0000000); +- fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_addr, 0x0F70); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_select, 0xB0000000); ++ fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0100000); +- fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0100000); ++ fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0200000); +- fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0200000); ++ fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0300000); +- fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0300000); ++ fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0400000); +- fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0400000); ++ fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0500000); +- fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0500000); ++ fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0600000); +- fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0600000); ++ fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0700000); +- fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0700000); ++ fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0800000); +- fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0800000); ++ fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0900000); +- fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0900000); ++ fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0A00000); +- fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0A00000); ++ fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* RISC I/O register. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0010); +- fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); ++ wrt_reg_dword(®->iobase_addr, 0x0010); ++ fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) +- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); ++ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; +@@ -1494,19 +1494,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; +@@ -1661,7 +1661,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) + fw = &ha->fw_dump->isp.isp81; + qla2xxx_prep_dump(ha, ha->fw_dump); + +- fw->host_status = htonl(RD_REG_DWORD(®->host_status)); ++ fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip +@@ -1675,73 +1675,73 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) + qla24xx_read_window(reg, 0x7010, 16, iter_reg); + + /* PCIe registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x7C00); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_window, 0x01); ++ wrt_reg_dword(®->iobase_addr, 0x7C00); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_window, 0x01); + dmp_reg = ®->iobase_c4; +- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; +- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; +- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); +- fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); ++ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); ++ fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + +- WRT_REG_DWORD(®->iobase_window, 0x00); +- RD_REG_DWORD(®->iobase_window); ++ wrt_reg_dword(®->iobase_window, 0x00); ++ rd_reg_dword(®->iobase_window); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) +- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ +- WRT_REG_DWORD(®->ictrl, 0); +- RD_REG_DWORD(®->ictrl); ++ wrt_reg_dword(®->ictrl, 0); ++ rd_reg_dword(®->ictrl); + + /* Shadow registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0F70); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_select, 0xB0000000); +- fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_addr, 0x0F70); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_select, 0xB0000000); ++ fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0100000); +- fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0100000); ++ fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0200000); +- fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0200000); ++ fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0300000); +- fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0300000); ++ fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0400000); +- fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0400000); ++ fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0500000); +- fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0500000); ++ fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0600000); +- fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0600000); ++ fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0700000); +- fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0700000); ++ fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0800000); +- fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0800000); ++ fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0900000); +- fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0900000); ++ fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0A00000); +- fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0A00000); ++ fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* RISC I/O register. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0010); +- fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); ++ wrt_reg_dword(®->iobase_addr, 0x0010); ++ fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) +- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); ++ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; +@@ -1805,19 +1805,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; +@@ -1976,7 +1976,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + fw = &ha->fw_dump->isp.isp83; + qla2xxx_prep_dump(ha, ha->fw_dump); + +- fw->host_status = htonl(RD_REG_DWORD(®->host_status)); ++ fw->host_status = htonl(rd_reg_dword(®->host_status)); + + /* + * Pause RISC. No need to track timeout, as resetting the chip +@@ -1984,24 +1984,24 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + */ + qla24xx_pause_risc(reg, ha); + +- WRT_REG_DWORD(®->iobase_addr, 0x6000); ++ wrt_reg_dword(®->iobase_addr, 0x6000); + dmp_reg = ®->iobase_window; +- RD_REG_DWORD(dmp_reg); +- WRT_REG_DWORD(dmp_reg, 0); ++ rd_reg_dword(dmp_reg); ++ wrt_reg_dword(dmp_reg, 0); + + dmp_reg = ®->unused_4_1[0]; +- RD_REG_DWORD(dmp_reg); +- WRT_REG_DWORD(dmp_reg, 0); ++ rd_reg_dword(dmp_reg); ++ wrt_reg_dword(dmp_reg, 0); + +- WRT_REG_DWORD(®->iobase_addr, 0x6010); ++ wrt_reg_dword(®->iobase_addr, 0x6010); + dmp_reg = ®->unused_4_1[2]; +- RD_REG_DWORD(dmp_reg); +- WRT_REG_DWORD(dmp_reg, 0); ++ rd_reg_dword(dmp_reg); ++ wrt_reg_dword(dmp_reg, 0); + + /* select PCR and disable ecc checking and correction */ +- WRT_REG_DWORD(®->iobase_addr, 0x0F70); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_select, 0x60000000); /* write to F0h = PCR */ ++ wrt_reg_dword(®->iobase_addr, 0x0F70); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_select, 0x60000000); /* write to F0h = PCR */ + + /* Host/Risc registers. */ + iter_reg = fw->host_risc_reg; +@@ -2010,73 +2010,73 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + qla24xx_read_window(reg, 0x7040, 16, iter_reg); + + /* PCIe registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x7C00); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_window, 0x01); ++ wrt_reg_dword(®->iobase_addr, 0x7C00); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_window, 0x01); + dmp_reg = ®->iobase_c4; +- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; +- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + dmp_reg++; +- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); +- fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); ++ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); ++ fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + +- WRT_REG_DWORD(®->iobase_window, 0x00); +- RD_REG_DWORD(®->iobase_window); ++ wrt_reg_dword(®->iobase_window, 0x00); ++ rd_reg_dword(®->iobase_window); + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; + for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) +- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); ++ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + + /* Disable interrupts. */ +- WRT_REG_DWORD(®->ictrl, 0); +- RD_REG_DWORD(®->ictrl); ++ wrt_reg_dword(®->ictrl, 0); ++ rd_reg_dword(®->ictrl); + + /* Shadow registers. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0F70); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_select, 0xB0000000); +- fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_addr, 0x0F70); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_select, 0xB0000000); ++ fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0100000); +- fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0100000); ++ fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0200000); +- fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0200000); ++ fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0300000); +- fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0300000); ++ fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0400000); +- fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0400000); ++ fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0500000); +- fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0500000); ++ fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0600000); +- fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0600000); ++ fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0700000); +- fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0700000); ++ fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0800000); +- fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0800000); ++ fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0900000); +- fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0900000); ++ fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + +- WRT_REG_DWORD(®->iobase_select, 0xB0A00000); +- fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); ++ wrt_reg_dword(®->iobase_select, 0xB0A00000); ++ fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + + /* RISC I/O register. */ +- WRT_REG_DWORD(®->iobase_addr, 0x0010); +- fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); ++ wrt_reg_dword(®->iobase_addr, 0x0010); ++ fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; + for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) +- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); ++ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + + /* Transfer sequence registers. */ + iter_reg = fw->xseq_gp_reg; +@@ -2172,19 +2172,19 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->resp0_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + iter_reg = fw->req1_dma_reg; + iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); + dmp_reg = ®->iobase_q; + for (cnt = 0; cnt < 7; cnt++, dmp_reg++) +- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); ++ *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + + /* Transmit DMA registers. */ + iter_reg = fw->xmt0_dma_reg; +@@ -2390,16 +2390,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + + ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); + +- WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); ++ rd_reg_dword(®->hccr); + +- WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); ++ rd_reg_dword(®->hccr); + +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); ++ rd_reg_dword(®->hccr); + +- for (cnt = 30000; cnt && (RD_REG_WORD(®->mailbox0)); cnt--) ++ for (cnt = 30000; cnt && (rd_reg_word(®->mailbox0)); cnt--) + udelay(5); + + if (!cnt) { +@@ -2675,7 +2675,7 @@ ql_dump_regs(uint level, scsi_qla_host_t + ql_dbg(level, vha, id, "Mailbox registers:\n"); + for (i = 0; i < 6; i++, mbx_reg++) + ql_dbg(level, vha, id, +- "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg)); ++ "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); + } + + +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -128,47 +128,47 @@ static inline uint32_t make_handle(uint1 + * I/O register + */ + +-static inline u8 RD_REG_BYTE(const volatile u8 __iomem *addr) ++static inline u8 rd_reg_byte(const volatile u8 __iomem *addr) + { + return readb(addr); + } + +-static inline u16 RD_REG_WORD(const volatile __le16 __iomem *addr) ++static inline u16 rd_reg_word(const volatile __le16 __iomem *addr) + { + return readw(addr); + } + +-static inline u32 RD_REG_DWORD(const volatile __le32 __iomem *addr) ++static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr) + { + return readl(addr); + } + +-static inline u8 RD_REG_BYTE_RELAXED(const volatile u8 __iomem *addr) ++static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr) + { + return readb_relaxed(addr); + } + +-static inline u16 RD_REG_WORD_RELAXED(const volatile __le16 __iomem *addr) ++static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr) + { + return readw_relaxed(addr); + } + +-static inline u32 RD_REG_DWORD_RELAXED(const volatile __le32 __iomem *addr) ++static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr) + { + return readl_relaxed(addr); + } + +-static inline void WRT_REG_BYTE(volatile u8 __iomem *addr, u8 data) ++static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data) + { + return writeb(data, addr); + } + +-static inline void WRT_REG_WORD(volatile __le16 __iomem *addr, u16 data) ++static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data) + { + return writew(data, addr); + } + +-static inline void WRT_REG_DWORD(volatile __le32 __iomem *addr, u32 data) ++static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data) + { + return writel(data, addr); + } +@@ -956,18 +956,18 @@ typedef union { + &(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \ + &(reg)->u.isp2300.mailbox0 + (num)) + #define RD_MAILBOX_REG(ha, reg, num) \ +- RD_REG_WORD(MAILBOX_REG(ha, reg, num)) ++ rd_reg_word(MAILBOX_REG(ha, reg, num)) + #define WRT_MAILBOX_REG(ha, reg, num, data) \ +- WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data) ++ wrt_reg_word(MAILBOX_REG(ha, reg, num), data) + + #define FB_CMD_REG(ha, reg) \ + (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ + &(reg)->fb_cmd_2100 : \ + &(reg)->u.isp2300.fb_cmd) + #define RD_FB_CMD_REG(ha, reg) \ +- RD_REG_WORD(FB_CMD_REG(ha, reg)) ++ rd_reg_word(FB_CMD_REG(ha, reg)) + #define WRT_FB_CMD_REG(ha, reg, data) \ +- WRT_REG_WORD(FB_CMD_REG(ha, reg), data) ++ wrt_reg_word(FB_CMD_REG(ha, reg), data) + + typedef struct { + uint32_t out_mb; /* outbound from driver */ +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -2221,7 +2221,7 @@ qla2x00_initialize_adapter(scsi_qla_host + + /* Check for secure flash support */ + if (IS_QLA28XX(ha)) { +- if (RD_REG_WORD(®->mailbox12) & BIT_0) ++ if (rd_reg_word(®->mailbox12) & BIT_0) + ha->flags.secure_adapter = 1; + ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", + (ha->flags.secure_adapter) ? "Yes" : "No"); +@@ -2359,7 +2359,7 @@ qla2100_pci_config(scsi_qla_host_t *vha) + + /* Get PCI bus information. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- ha->pci_attr = RD_REG_WORD(®->ctrl_status); ++ ha->pci_attr = rd_reg_word(®->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +@@ -2401,17 +2401,17 @@ qla2300_pci_config(scsi_qla_host_t *vha) + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Pause RISC. */ +- WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); ++ wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) ++ if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) + break; + + udelay(10); + } + + /* Select FPM registers. */ +- WRT_REG_WORD(®->ctrl_status, 0x20); +- RD_REG_WORD(®->ctrl_status); ++ wrt_reg_word(®->ctrl_status, 0x20); ++ rd_reg_word(®->ctrl_status); + + /* Get the fb rev level */ + ha->fb_rev = RD_FB_CMD_REG(ha, reg); +@@ -2420,13 +2420,13 @@ qla2300_pci_config(scsi_qla_host_t *vha) + pci_clear_mwi(ha->pdev); + + /* Deselect FPM registers. */ +- WRT_REG_WORD(®->ctrl_status, 0x0); +- RD_REG_WORD(®->ctrl_status); ++ wrt_reg_word(®->ctrl_status, 0x0); ++ rd_reg_word(®->ctrl_status); + + /* Release RISC module. */ +- WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); ++ wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) ++ if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0) + break; + + udelay(10); +@@ -2441,7 +2441,7 @@ qla2300_pci_config(scsi_qla_host_t *vha) + + /* Get PCI bus information. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- ha->pci_attr = RD_REG_WORD(®->ctrl_status); ++ ha->pci_attr = rd_reg_word(®->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +@@ -2485,7 +2485,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha) + + /* Get PCI bus information. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- ha->pci_attr = RD_REG_DWORD(®->ctrl_status); ++ ha->pci_attr = rd_reg_dword(®->ctrl_status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +@@ -2589,36 +2589,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) + + if (!IS_QLA2100(ha)) { + /* Pause RISC. */ +- WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); ++ wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + if (IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_WORD(®->hccr) & ++ if ((rd_reg_word(®->hccr) & + HCCR_RISC_PAUSE) != 0) + break; + udelay(100); + } + } else { +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ rd_reg_word(®->hccr); /* PCI Posting. */ + udelay(10); + } + + /* Select FPM registers. */ +- WRT_REG_WORD(®->ctrl_status, 0x20); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, 0x20); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + /* FPM Soft Reset. */ +- WRT_REG_WORD(®->fpm_diag_config, 0x100); +- RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ ++ wrt_reg_word(®->fpm_diag_config, 0x100); ++ rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ + + /* Toggle Fpm Reset. */ + if (!IS_QLA2200(ha)) { +- WRT_REG_WORD(®->fpm_diag_config, 0x0); +- RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ ++ wrt_reg_word(®->fpm_diag_config, 0x0); ++ rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ + } + + /* Select frame buffer registers. */ +- WRT_REG_WORD(®->ctrl_status, 0x10); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, 0x10); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + /* Reset frame buffer FIFOs. */ + if (IS_QLA2200(ha)) { +@@ -2636,23 +2636,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) + } + + /* Select RISC module registers. */ +- WRT_REG_WORD(®->ctrl_status, 0); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, 0); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + /* Reset RISC processor. */ +- WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ wrt_reg_word(®->hccr, HCCR_RESET_RISC); ++ rd_reg_word(®->hccr); /* PCI Posting. */ + + /* Release RISC processor. */ +- WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); ++ rd_reg_word(®->hccr); /* PCI Posting. */ + } + +- WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); +- WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); ++ wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); ++ wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT); + + /* Reset ISP chip. */ +- WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); ++ wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + + /* Wait for RISC to recover from reset. */ + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { +@@ -2663,7 +2663,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) + */ + udelay(20); + for (cnt = 30000; cnt; cnt--) { +- if ((RD_REG_WORD(®->ctrl_status) & ++ if ((rd_reg_word(®->ctrl_status) & + CSR_ISP_SOFT_RESET) == 0) + break; + udelay(100); +@@ -2672,13 +2672,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) + udelay(10); + + /* Reset RISC processor. */ +- WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); ++ wrt_reg_word(®->hccr, HCCR_RESET_RISC); + +- WRT_REG_WORD(®->semaphore, 0); ++ wrt_reg_word(®->semaphore, 0); + + /* Release RISC processor. */ +- WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); ++ rd_reg_word(®->hccr); /* PCI Posting. */ + + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { +@@ -2696,8 +2696,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) + + /* Disable RISC pause on FPM parity error. */ + if (!IS_QLA2100(ha)) { +- WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE); ++ rd_reg_word(®->hccr); /* PCI Posting. */ + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); +@@ -2742,32 +2742,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Reset RISC. */ +- WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); ++ wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) ++ if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) + break; + + udelay(10); + } + +- if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) ++ if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, + "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", +- RD_REG_DWORD(®->hccr), +- RD_REG_DWORD(®->ctrl_status), +- (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)); ++ rd_reg_dword(®->hccr), ++ rd_reg_dword(®->ctrl_status), ++ (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)); + +- WRT_REG_DWORD(®->ctrl_status, ++ wrt_reg_dword(®->ctrl_status, + CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + + udelay(100); + + /* Wait for firmware to complete NVRAM accesses. */ +- RD_REG_WORD(®->mailbox0); +- for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && ++ rd_reg_word(®->mailbox0); ++ for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { + barrier(); + if (cnt) +@@ -2781,26 +2781,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, + "HCCR: 0x%x, MailBox0 Status 0x%x\n", +- RD_REG_DWORD(®->hccr), +- RD_REG_WORD(®->mailbox0)); ++ rd_reg_dword(®->hccr), ++ rd_reg_word(®->mailbox0)); + + /* Wait for soft-reset to complete. */ +- RD_REG_DWORD(®->ctrl_status); ++ rd_reg_dword(®->ctrl_status); + for (cnt = 0; cnt < 60; cnt++) { + barrier(); +- if ((RD_REG_DWORD(®->ctrl_status) & ++ if ((rd_reg_dword(®->ctrl_status) & + CSRX_ISP_SOFT_RESET) == 0) + break; + + udelay(5); + } +- if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) ++ if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, + "HCCR: 0x%x, Soft Reset status: 0x%x\n", +- RD_REG_DWORD(®->hccr), +- RD_REG_DWORD(®->ctrl_status)); ++ rd_reg_dword(®->hccr), ++ rd_reg_dword(®->ctrl_status)); + + /* If required, do an MPI FW reset now */ + if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { +@@ -2819,17 +2819,17 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) + } + } + +- WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); ++ rd_reg_dword(®->hccr); + +- WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); ++ rd_reg_dword(®->hccr); + +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); ++ rd_reg_dword(®->hccr); + +- RD_REG_WORD(®->mailbox0); +- for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && ++ rd_reg_word(®->mailbox0); ++ for (cnt = 60; rd_reg_word(®->mailbox0) != 0 && + rval == QLA_SUCCESS; cnt--) { + barrier(); + if (cnt) +@@ -2842,8 +2842,8 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, + "Host Risc 0x%x, mailbox0 0x%x\n", +- RD_REG_DWORD(®->hccr), +- RD_REG_WORD(®->mailbox0)); ++ rd_reg_dword(®->hccr), ++ rd_reg_word(®->mailbox0)); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +@@ -2862,9 +2862,8 @@ qla25xx_read_risc_sema_reg(scsi_qla_host + { + struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + +- WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); +- *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); +- ++ wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); ++ *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); + } + + static void +@@ -2872,8 +2871,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_hos + { + struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + +- WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); +- WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); ++ wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); ++ wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); + } + + static void +@@ -2889,7 +2888,7 @@ qla25xx_manipulate_risc_semaphore(scsi_q + vha->hw->pdev->subsystem_device != 0x0240) + return; + +- WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); ++ wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); + udelay(100); + + attempt: +@@ -2991,7 +2990,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Reset ISP chip. */ +- WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); ++ wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + + /* + * We need to have a delay here since the card will not respond while +@@ -3001,7 +3000,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) + data = qla2x00_debounce_register(®->ctrl_status); + for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { + udelay(5); +- data = RD_REG_WORD(®->ctrl_status); ++ data = rd_reg_word(®->ctrl_status); + barrier(); + } + +@@ -3012,8 +3011,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) + "Reset register cleared by chip reset.\n"); + + /* Reset RISC processor. */ +- WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); +- WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); ++ wrt_reg_word(®->hccr, HCCR_RESET_RISC); ++ wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + + /* Workaround for QLA2312 PCI parity error */ + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { +@@ -3652,8 +3651,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) + if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { + /* Disable SRAM, Instruction RAM and GP RAM parity. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); ++ rd_reg_word(®->hccr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + +@@ -3760,11 +3759,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) + spin_lock_irqsave(&ha->hardware_lock, flags); + if (IS_QLA2300(ha)) + /* SRAM parity */ +- WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); ++ wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1); + else + /* SRAM, Instruction RAM and GP RAM parity */ +- WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7); ++ rd_reg_word(®->hccr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + +@@ -4008,11 +4007,11 @@ qla2x00_config_rings(struct scsi_qla_hos + put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); + put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); + +- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); +- WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); +- WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); +- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); +- RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ ++ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); ++ wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); ++ wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); ++ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); ++ rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ + } + + void +@@ -4074,15 +4073,15 @@ qla24xx_config_rings(struct scsi_qla_hos + } + icb->firmware_options_2 |= cpu_to_le32(BIT_23); + +- WRT_REG_DWORD(®->isp25mq.req_q_in, 0); +- WRT_REG_DWORD(®->isp25mq.req_q_out, 0); +- WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); +- WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); ++ wrt_reg_dword(®->isp25mq.req_q_in, 0); ++ wrt_reg_dword(®->isp25mq.req_q_out, 0); ++ wrt_reg_dword(®->isp25mq.rsp_q_in, 0); ++ wrt_reg_dword(®->isp25mq.rsp_q_out, 0); + } else { +- WRT_REG_DWORD(®->isp24.req_q_in, 0); +- WRT_REG_DWORD(®->isp24.req_q_out, 0); +- WRT_REG_DWORD(®->isp24.rsp_q_in, 0); +- WRT_REG_DWORD(®->isp24.rsp_q_out, 0); ++ wrt_reg_dword(®->isp24.req_q_in, 0); ++ wrt_reg_dword(®->isp24.req_q_out, 0); ++ wrt_reg_dword(®->isp24.rsp_q_in, 0); ++ wrt_reg_dword(®->isp24.rsp_q_out, 0); + } + + qlt_24xx_config_rings(vha); +@@ -4096,7 +4095,7 @@ qla24xx_config_rings(struct scsi_qla_hos + } + + /* PCI posting */ +- RD_REG_WORD(&ioreg->hccr); ++ rd_reg_word(&ioreg->hccr); + } + + /** +@@ -4567,7 +4566,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vh + ha->nvram_size = sizeof(*nv); + ha->nvram_base = 0; + if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) +- if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) ++ if ((rd_reg_word(®->ctrl_status) >> 14) == 1) + ha->nvram_base = 0x80; + + /* Get NVRAM data and calculate checksum. */ +@@ -7088,10 +7087,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *v + ha->isp_ops->disable_intrs(ha); + + spin_lock_irqsave(&ha->hardware_lock, flags); +- WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); +- RD_REG_WORD(®->hccr); /* PCI Posting. */ +- WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); +- RD_REG_WORD(®->hccr); /* PCI Posting. */ ++ wrt_reg_word(®->hccr, HCCR_RESET_RISC); ++ rd_reg_word(®->hccr); /* PCI Posting. */ ++ wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); ++ rd_reg_word(®->hccr); /* PCI Posting. */ + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; +@@ -7112,10 +7111,10 @@ qla24xx_reset_adapter(scsi_qla_host_t *v + ha->isp_ops->disable_intrs(ha); + + spin_lock_irqsave(&ha->hardware_lock, flags); +- WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); +- RD_REG_DWORD(®->hccr); +- WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); +- RD_REG_DWORD(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); ++ rd_reg_dword(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); ++ rd_reg_dword(®->hccr); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (IS_NOPOLLING_TYPE(ha)) +--- a/drivers/scsi/qla2xxx/qla_inline.h ++++ b/drivers/scsi/qla2xxx/qla_inline.h +@@ -46,10 +46,10 @@ qla2x00_debounce_register(volatile uint1 + volatile uint16_t second; + + do { +- first = RD_REG_WORD(addr); ++ first = rd_reg_word(addr); + barrier(); + cpu_relax(); +- second = RD_REG_WORD(addr); ++ second = rd_reg_word(addr); + } while (first != second); + + return (first); +@@ -329,7 +329,7 @@ qla_83xx_start_iocbs(struct qla_qpair *q + } else + req->ring_ptr++; + +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + } + + static inline int +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -376,7 +376,7 @@ qla2x00_start_scsi(srb_t *sp) + /* Calculate the number of request entries needed. */ + req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); + if (req->cnt < (req_cnt + 2)) { +- cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); ++ cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else +@@ -428,8 +428,8 @@ qla2x00_start_scsi(srb_t *sp) + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ +- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); +- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ ++ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); ++ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && +@@ -472,21 +472,21 @@ qla2x00_start_iocbs(struct scsi_qla_host + + /* Set chip new ring index. */ + if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + } else if (IS_QLA83XX(ha)) { +- WRT_REG_DWORD(req->req_q_in, req->ring_index); +- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); ++ wrt_reg_dword(req->req_q_in, req->ring_index); ++ rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); + } else if (IS_QLAFX00(ha)) { +- WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); +- RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); ++ wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); ++ rd_reg_dword_relaxed(®->ispfx00.req_q_in); + QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); + } else if (IS_FWI2_CAPABLE(ha)) { +- WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); +- RD_REG_DWORD_RELAXED(®->isp24.req_q_in); ++ wrt_reg_dword(®->isp24.req_q_in, req->ring_index); ++ rd_reg_dword_relaxed(®->isp24.req_q_in); + } else { +- WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), ++ wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), + req->ring_index); +- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); ++ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); + } + } + } +@@ -1637,7 +1637,7 @@ qla24xx_start_scsi(srb_t *sp) + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out); ++ rd_reg_dword_relaxed(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else +@@ -1698,7 +1698,7 @@ qla24xx_start_scsi(srb_t *sp) + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; +@@ -1822,7 +1822,7 @@ qla24xx_dif_start_scsi(srb_t *sp) + tot_dsds += nseg; + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out); ++ rd_reg_dword_relaxed(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else +@@ -1881,7 +1881,7 @@ qla24xx_dif_start_scsi(srb_t *sp) + req->ring_ptr++; + + /* Set chip new ring index. */ +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +@@ -1957,7 +1957,7 @@ qla2xxx_start_scsi_mq(srb_t *sp) + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out); ++ rd_reg_dword_relaxed(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else +@@ -2018,7 +2018,7 @@ qla2xxx_start_scsi_mq(srb_t *sp) + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + + spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_SUCCESS; +@@ -2157,7 +2157,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) + tot_dsds += nseg; + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out); ++ rd_reg_dword_relaxed(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else +@@ -2214,7 +2214,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) + req->ring_ptr++; + + /* Set chip new ring index. */ +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && +@@ -2266,13 +2266,13 @@ void * + cnt = *req->out_ptr; + else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) +- cnt = RD_REG_DWORD(®->isp25mq.req_q_out); ++ cnt = rd_reg_dword(®->isp25mq.req_q_out); + else if (IS_P3P_TYPE(ha)) +- cnt = RD_REG_DWORD(reg->isp82.req_q_out); ++ cnt = rd_reg_dword(reg->isp82.req_q_out); + else if (IS_FWI2_CAPABLE(ha)) +- cnt = RD_REG_DWORD(®->isp24.req_q_out); ++ cnt = rd_reg_dword(®->isp24.req_q_out); + else if (IS_QLAFX00(ha)) +- cnt = RD_REG_DWORD(®->ispfx00.req_q_out); ++ cnt = rd_reg_dword(®->ispfx00.req_q_out); + else + cnt = qla2x00_debounce_register( + ISP_REQ_Q_OUT(ha, ®->isp)); +@@ -2305,8 +2305,8 @@ void * + pkt = req->ring_ptr; + memset(pkt, 0, REQUEST_ENTRY_SIZE); + if (IS_QLAFX00(ha)) { +- WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); +- WRT_REG_WORD((void __iomem *)&pkt->handle, handle); ++ wrt_reg_byte((void __iomem *)&pkt->entry_count, req_cnt); ++ wrt_reg_word((void __iomem *)&pkt->handle, handle); + } else { + pkt->entry_count = req_cnt; + pkt->handle = handle; +@@ -3313,7 +3313,7 @@ qla82xx_start_scsi(srb_t *sp) + req_cnt = 1; + + if (req->cnt < (req_cnt + 2)) { +- cnt = (uint16_t)RD_REG_DWORD_RELAXED( ++ cnt = (uint16_t)rd_reg_dword_relaxed( + ®->req_q_out[0]); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; +@@ -3422,7 +3422,7 @@ qla82xx_start_scsi(srb_t *sp) + + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { +- cnt = (uint16_t)RD_REG_DWORD_RELAXED( ++ cnt = (uint16_t)rd_reg_dword_relaxed( + ®->req_q_out[0]); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; +@@ -3498,10 +3498,10 @@ qla82xx_start_scsi(srb_t *sp) + if (ql2xdbwr) + qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); + else { +- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); ++ wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); +- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { +- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); ++ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { ++ wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); + } + } +@@ -3897,7 +3897,7 @@ qla2x00_start_bidir(srb_t *sp, struct sc + /* Check for room on request queue. */ + if (req->cnt < req_cnt + 2) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out); ++ rd_reg_dword_relaxed(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -204,7 +204,7 @@ qla2100_intr_handler(int irq, void *dev_ + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; ) { +- hccr = RD_REG_WORD(®->hccr); ++ hccr = rd_reg_word(®->hccr); + if (qla2x00_check_reg16_for_disconnect(vha, hccr)) + break; + if (hccr & HCCR_RISC_PAUSE) { +@@ -216,18 +216,18 @@ qla2100_intr_handler(int irq, void *dev_ + * bit to be cleared. Schedule a big hammer to get + * out of the RISC PAUSED state. + */ +- WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_RESET_RISC); ++ rd_reg_word(®->hccr); + + ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; +- } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) ++ } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) + break; + +- if (RD_REG_WORD(®->semaphore) & BIT_0) { +- WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ if (rd_reg_word(®->semaphore) & BIT_0) { ++ wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); ++ rd_reg_word(®->hccr); + + /* Get mailbox data. */ + mb[0] = RD_MAILBOX_REG(ha, reg, 0); +@@ -246,13 +246,13 @@ qla2100_intr_handler(int irq, void *dev_ + mb[0]); + } + /* Release mailbox registers. */ +- WRT_REG_WORD(®->semaphore, 0); +- RD_REG_WORD(®->semaphore); ++ wrt_reg_word(®->semaphore, 0); ++ rd_reg_word(®->semaphore); + } else { + qla2x00_process_response_queue(rsp); + +- WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); ++ rd_reg_word(®->hccr); + } + } + qla2x00_handle_mbx_completion(ha, status); +@@ -324,14 +324,14 @@ qla2300_intr_handler(int irq, void *dev_ + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; ) { +- stat = RD_REG_DWORD(®->u.isp2300.host_status); ++ stat = rd_reg_dword(®->u.isp2300.host_status); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + if (stat & HSR_RISC_PAUSED) { + if (unlikely(pci_channel_offline(ha->pdev))) + break; + +- hccr = RD_REG_WORD(®->hccr); ++ hccr = rd_reg_word(®->hccr); + + if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) + ql_log(ql_log_warn, vha, 0x5026, +@@ -347,8 +347,8 @@ qla2300_intr_handler(int irq, void *dev_ + * interrupt bit to be cleared. Schedule a big + * hammer to get out of the RISC PAUSED state. + */ +- WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_RESET_RISC); ++ rd_reg_word(®->hccr); + + ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); +@@ -365,7 +365,7 @@ qla2300_intr_handler(int irq, void *dev_ + status |= MBX_INTERRUPT; + + /* Release mailbox registers. */ +- WRT_REG_WORD(®->semaphore, 0); ++ wrt_reg_word(®->semaphore, 0); + break; + case 0x12: + mb[0] = MSW(stat); +@@ -393,8 +393,8 @@ qla2300_intr_handler(int irq, void *dev_ + "Unrecognized interrupt type (%d).\n", stat & 0xff); + break; + } +- WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); +- RD_REG_WORD_RELAXED(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); ++ rd_reg_word_relaxed(®->hccr); + } + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +@@ -436,7 +436,7 @@ qla2x00_mbx_completion(scsi_qla_host_t * + if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) + ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); + else if (mboxes & BIT_0) +- ha->mailbox_out[cnt] = RD_REG_WORD(wptr); ++ ha->mailbox_out[cnt] = rd_reg_word(wptr); + + wptr++; + mboxes >>= 1; +@@ -463,7 +463,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, + return; + + for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) +- mb[cnt] = RD_REG_WORD(wptr); ++ mb[cnt] = rd_reg_word(wptr); + + ql_dbg(ql_dbg_async, vha, 0x5021, + "Inter-Driver Communication %s -- " +@@ -891,10 +891,10 @@ qla2x00_async_event(scsi_qla_host_t *vha + IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + u16 m[4]; + +- m[0] = RD_REG_WORD(®24->mailbox4); +- m[1] = RD_REG_WORD(®24->mailbox5); +- m[2] = RD_REG_WORD(®24->mailbox6); +- mbx = m[3] = RD_REG_WORD(®24->mailbox7); ++ m[0] = rd_reg_word(®24->mailbox4); ++ m[1] = rd_reg_word(®24->mailbox5); ++ m[2] = rd_reg_word(®24->mailbox6); ++ mbx = m[3] = rd_reg_word(®24->mailbox7); + + ql_log(ql_log_warn, vha, 0x5003, + "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", +@@ -905,7 +905,7 @@ qla2x00_async_event(scsi_qla_host_t *vha + mb[1], mb[2], mb[3]); + + if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && +- RD_REG_WORD(®24->mailbox7) & BIT_8) ++ rd_reg_word(®24->mailbox7) & BIT_8) + ha->isp_ops->mpi_fw_dump(vha, 1); + ha->isp_ops->fw_dump(vha); + ha->flags.fw_init_done = 0; +@@ -1012,8 +1012,8 @@ qla2x00_async_event(scsi_qla_host_t *vha + ha->current_topology = 0; + + mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) +- ? RD_REG_WORD(®24->mailbox4) : 0; +- mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) ++ ? rd_reg_word(®24->mailbox4) : 0; ++ mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) + : mbx; + ql_log(ql_log_info, vha, 0x500b, + "LOOP DOWN detected (%x %x %x %x).\n", +@@ -1380,7 +1380,7 @@ qla2x00_async_event(scsi_qla_host_t *vha + break; + case MBA_IDC_NOTIFY: + if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { +- mb[4] = RD_REG_WORD(®24->mailbox4); ++ mb[4] = rd_reg_word(®24->mailbox4); + if (((mb[2] & 0x7fff) == MBC_PORT_RESET || + (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && + (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { +@@ -1409,10 +1409,10 @@ qla2x00_async_event(scsi_qla_host_t *vha + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + qla27xx_handle_8200_aen(vha, mb); + } else if (IS_QLA83XX(ha)) { +- mb[4] = RD_REG_WORD(®24->mailbox4); +- mb[5] = RD_REG_WORD(®24->mailbox5); +- mb[6] = RD_REG_WORD(®24->mailbox6); +- mb[7] = RD_REG_WORD(®24->mailbox7); ++ mb[4] = rd_reg_word(®24->mailbox4); ++ mb[5] = rd_reg_word(®24->mailbox5); ++ mb[6] = rd_reg_word(®24->mailbox6); ++ mb[7] = rd_reg_word(®24->mailbox7); + qla83xx_handle_8200_aen(vha, mb); + } else { + ql_dbg(ql_dbg_async, vha, 0x5052, +@@ -2320,7 +2320,7 @@ qla2x00_process_response_queue(struct rs + } + + /* Adjust ring index */ +- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); ++ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); + } + + static inline void +@@ -3183,7 +3183,7 @@ qla24xx_mbx_completion(scsi_qla_host_t * + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) +- ha->mailbox_out[cnt] = RD_REG_WORD(wptr); ++ ha->mailbox_out[cnt] = rd_reg_word(wptr); + + mboxes >>= 1; + wptr++; +@@ -3360,9 +3360,9 @@ void qla24xx_process_response_queue(stru + if (IS_P3P_TYPE(ha)) { + struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; + +- WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); ++ wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); + } else { +- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); ++ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); + } + } + +@@ -3379,13 +3379,13 @@ qla2xxx_check_risc_status(scsi_qla_host_ + return; + + rval = QLA_SUCCESS; +- WRT_REG_DWORD(®->iobase_addr, 0x7C00); +- RD_REG_DWORD(®->iobase_addr); +- WRT_REG_DWORD(®->iobase_window, 0x0001); +- for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && ++ wrt_reg_dword(®->iobase_addr, 0x7C00); ++ rd_reg_dword(®->iobase_addr); ++ wrt_reg_dword(®->iobase_window, 0x0001); ++ for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) { +- WRT_REG_DWORD(®->iobase_window, 0x0001); ++ wrt_reg_dword(®->iobase_window, 0x0001); + udelay(10); + } else + rval = QLA_FUNCTION_TIMEOUT; +@@ -3394,11 +3394,11 @@ qla2xxx_check_risc_status(scsi_qla_host_ + goto next_test; + + rval = QLA_SUCCESS; +- WRT_REG_DWORD(®->iobase_window, 0x0003); +- for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && ++ wrt_reg_dword(®->iobase_window, 0x0003); ++ for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && + rval == QLA_SUCCESS; cnt--) { + if (cnt) { +- WRT_REG_DWORD(®->iobase_window, 0x0003); ++ wrt_reg_dword(®->iobase_window, 0x0003); + udelay(10); + } else + rval = QLA_FUNCTION_TIMEOUT; +@@ -3407,13 +3407,13 @@ qla2xxx_check_risc_status(scsi_qla_host_ + goto done; + + next_test: +- if (RD_REG_DWORD(®->iobase_c8) & BIT_3) ++ if (rd_reg_dword(®->iobase_c8) & BIT_3) + ql_log(ql_log_info, vha, 0x504c, + "Additional code -- 0x55AA.\n"); + + done: +- WRT_REG_DWORD(®->iobase_window, 0x0000); +- RD_REG_DWORD(®->iobase_window); ++ wrt_reg_dword(®->iobase_window, 0x0000); ++ rd_reg_dword(®->iobase_window); + } + + /** +@@ -3457,14 +3457,14 @@ qla24xx_intr_handler(int irq, void *dev_ + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; ) { +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + if (stat & HSRX_RISC_PAUSED) { + if (unlikely(pci_channel_offline(ha->pdev))) + break; + +- hccr = RD_REG_DWORD(®->hccr); ++ hccr = rd_reg_dword(®->hccr); + + ql_log(ql_log_warn, vha, 0x504b, + "RISC paused -- HCCR=%x, Dumping firmware.\n", +@@ -3489,9 +3489,9 @@ qla24xx_intr_handler(int irq, void *dev_ + break; + case INTR_ASYNC_EVENT: + mb[0] = MSW(stat); +- mb[1] = RD_REG_WORD(®->mailbox1); +- mb[2] = RD_REG_WORD(®->mailbox2); +- mb[3] = RD_REG_WORD(®->mailbox3); ++ mb[1] = rd_reg_word(®->mailbox1); ++ mb[2] = rd_reg_word(®->mailbox2); ++ mb[3] = rd_reg_word(®->mailbox3); + qla2x00_async_event(vha, rsp, mb); + break; + case INTR_RSP_QUE_UPDATE: +@@ -3511,8 +3511,8 @@ qla24xx_intr_handler(int irq, void *dev_ + "Unrecognized interrupt type (%d).\n", stat * 0xff); + break; + } +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +- RD_REG_DWORD_RELAXED(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); ++ rd_reg_dword_relaxed(®->hccr); + if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) + ndelay(3500); + } +@@ -3551,8 +3551,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id + vha = pci_get_drvdata(ha->pdev); + qla24xx_process_response_queue(vha, rsp); + if (!ha->flags.disable_msix_handshake) { +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +- RD_REG_DWORD_RELAXED(®->hccr); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); ++ rd_reg_dword_relaxed(®->hccr); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +@@ -3586,14 +3586,14 @@ qla24xx_msix_default(int irq, void *dev_ + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + do { +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + if (qla2x00_check_reg32_for_disconnect(vha, stat)) + break; + if (stat & HSRX_RISC_PAUSED) { + if (unlikely(pci_channel_offline(ha->pdev))) + break; + +- hccr = RD_REG_DWORD(®->hccr); ++ hccr = rd_reg_dword(®->hccr); + + ql_log(ql_log_info, vha, 0x5050, + "RISC paused -- HCCR=%x, Dumping firmware.\n", +@@ -3618,9 +3618,9 @@ qla24xx_msix_default(int irq, void *dev_ + break; + case INTR_ASYNC_EVENT: + mb[0] = MSW(stat); +- mb[1] = RD_REG_WORD(®->mailbox1); +- mb[2] = RD_REG_WORD(®->mailbox2); +- mb[3] = RD_REG_WORD(®->mailbox3); ++ mb[1] = rd_reg_word(®->mailbox1); ++ mb[2] = rd_reg_word(®->mailbox2); ++ mb[3] = rd_reg_word(®->mailbox3); + qla2x00_async_event(vha, rsp, mb); + break; + case INTR_RSP_QUE_UPDATE: +@@ -3640,7 +3640,7 @@ qla24xx_msix_default(int irq, void *dev_ + "Unrecognized interrupt type (%d).\n", stat & 0xff); + break; + } +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + } while (0); + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +@@ -3691,7 +3691,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev + + reg = &ha->iobase->isp24; + spin_lock_irqsave(&ha->hardware_lock, flags); +- WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); ++ wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + queue_work(ha->wq, &qpair->q_work); +@@ -3952,7 +3952,7 @@ qla2x00_request_irqs(struct qla_hw_data + goto fail; + + spin_lock_irq(&ha->hardware_lock); +- WRT_REG_WORD(®->isp.semaphore, 0); ++ wrt_reg_word(®->isp.semaphore, 0); + spin_unlock_irq(&ha->hardware_lock); + + fail: +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -227,7 +227,7 @@ qla2x00_mailbox_command(scsi_qla_host_t + if (mboxes & BIT_0) { + ql_dbg(ql_dbg_mbx, vha, 0x1112, + "mbox[%d]<-0x%04x\n", cnt, *iptr); +- WRT_REG_WORD(optr, *iptr); ++ wrt_reg_word(optr, *iptr); + } + + mboxes >>= 1; +@@ -253,11 +253,11 @@ qla2x00_mailbox_command(scsi_qla_host_t + set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + + if (IS_P3P_TYPE(ha)) +- WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); ++ wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); + else if (IS_FWI2_CAPABLE(ha)) +- WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); ++ wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); + else +- WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); ++ wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_time = jiffies; +@@ -300,7 +300,7 @@ qla2x00_mailbox_command(scsi_qla_host_t + "Cmd=%x Polling Mode.\n", command); + + if (IS_P3P_TYPE(ha)) { +- if (RD_REG_DWORD(®->isp82.hint) & ++ if (rd_reg_dword(®->isp82.hint) & + HINT_MBX_INT_PENDING) { + ha->flags.mbox_busy = 0; + spin_unlock_irqrestore(&ha->hardware_lock, +@@ -311,11 +311,11 @@ qla2x00_mailbox_command(scsi_qla_host_t + rval = QLA_FUNCTION_TIMEOUT; + goto premature_exit; + } +- WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); ++ wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); + } else if (IS_FWI2_CAPABLE(ha)) +- WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); ++ wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); + else +- WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); ++ wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ +@@ -413,14 +413,14 @@ qla2x00_mailbox_command(scsi_qla_host_t + uint16_t w; + + if (IS_FWI2_CAPABLE(ha)) { +- mb[0] = RD_REG_WORD(®->isp24.mailbox0); +- mb[1] = RD_REG_WORD(®->isp24.mailbox1); +- mb[2] = RD_REG_WORD(®->isp24.mailbox2); +- mb[3] = RD_REG_WORD(®->isp24.mailbox3); +- mb[7] = RD_REG_WORD(®->isp24.mailbox7); +- ictrl = RD_REG_DWORD(®->isp24.ictrl); +- host_status = RD_REG_DWORD(®->isp24.host_status); +- hccr = RD_REG_DWORD(®->isp24.hccr); ++ mb[0] = rd_reg_word(®->isp24.mailbox0); ++ mb[1] = rd_reg_word(®->isp24.mailbox1); ++ mb[2] = rd_reg_word(®->isp24.mailbox2); ++ mb[3] = rd_reg_word(®->isp24.mailbox3); ++ mb[7] = rd_reg_word(®->isp24.mailbox7); ++ ictrl = rd_reg_dword(®->isp24.ictrl); ++ host_status = rd_reg_dword(®->isp24.host_status); ++ hccr = rd_reg_dword(®->isp24.hccr); + + ql_log(ql_log_warn, vha, 0xd04c, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " +@@ -430,7 +430,7 @@ qla2x00_mailbox_command(scsi_qla_host_t + + } else { + mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); +- ictrl = RD_REG_WORD(®->isp.ictrl); ++ ictrl = rd_reg_word(®->isp.ictrl); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); +@@ -573,15 +573,15 @@ qla2x00_mailbox_command(scsi_qla_host_t + if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { + ql_dbg(ql_dbg_mbx, vha, 0x1198, + "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", +- RD_REG_DWORD(®->isp24.host_status), +- RD_REG_DWORD(®->isp24.ictrl), +- RD_REG_DWORD(®->isp24.istatus)); ++ rd_reg_dword(®->isp24.host_status), ++ rd_reg_dword(®->isp24.ictrl), ++ rd_reg_dword(®->isp24.istatus)); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1206, + "ctrl_status=%#x ictrl=%#x istatus=%#x\n", +- RD_REG_WORD(®->isp.ctrl_status), +- RD_REG_WORD(®->isp.ictrl), +- RD_REG_WORD(®->isp.istatus)); ++ rd_reg_word(®->isp.ctrl_status), ++ rd_reg_word(®->isp.ictrl), ++ rd_reg_word(®->isp.istatus)); + } + } else { + ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); +@@ -4427,9 +4427,9 @@ qla25xx_init_req_que(struct scsi_qla_hos + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (!(req->options & BIT_0)) { +- WRT_REG_DWORD(req->req_q_in, 0); ++ wrt_reg_dword(req->req_q_in, 0); + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) +- WRT_REG_DWORD(req->req_q_out, 0); ++ wrt_reg_dword(req->req_q_out, 0); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +@@ -4498,9 +4498,9 @@ qla25xx_init_rsp_que(struct scsi_qla_hos + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (!(rsp->options & BIT_0)) { +- WRT_REG_DWORD(rsp->rsp_q_out, 0); ++ wrt_reg_dword(rsp->rsp_q_out, 0); + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) +- WRT_REG_DWORD(rsp->rsp_q_in, 0); ++ wrt_reg_dword(rsp->rsp_q_in, 0); + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); +@@ -5411,18 +5411,18 @@ qla81xx_write_mpi_register(scsi_qla_host + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + /* Write the MBC data to the registers */ +- WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); +- WRT_REG_WORD(®->mailbox1, mb[0]); +- WRT_REG_WORD(®->mailbox2, mb[1]); +- WRT_REG_WORD(®->mailbox3, mb[2]); +- WRT_REG_WORD(®->mailbox4, mb[3]); ++ wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); ++ wrt_reg_word(®->mailbox1, mb[0]); ++ wrt_reg_word(®->mailbox2, mb[1]); ++ wrt_reg_word(®->mailbox3, mb[2]); ++ wrt_reg_word(®->mailbox4, mb[3]); + +- WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ++ wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + + /* Poll for MBC interrupt */ + for (timer = 6000000; timer; timer--) { + /* Check for pending interrupts. */ +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + if (stat & HSRX_RISC_INT) { + stat &= 0xff; + +@@ -5430,10 +5430,10 @@ qla81xx_write_mpi_register(scsi_qla_host + stat == 0x10 || stat == 0x11) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); +- mb0 = RD_REG_WORD(®->mailbox0); +- WRT_REG_DWORD(®->hccr, ++ mb0 = rd_reg_word(®->mailbox0); ++ wrt_reg_dword(®->hccr, + HCCRX_CLR_RISC_INT); +- RD_REG_DWORD(®->hccr); ++ rd_reg_dword(®->hccr); + break; + } + } +--- a/drivers/scsi/qla2xxx/qla_mr.c ++++ b/drivers/scsi/qla2xxx/qla_mr.c +@@ -117,7 +117,7 @@ qlafx00_mailbox_command(scsi_qla_host_t + + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) +- WRT_REG_DWORD(optr, *iptr); ++ wrt_reg_dword(optr, *iptr); + + mboxes >>= 1; + optr++; +@@ -676,14 +676,14 @@ qlafx00_config_rings(struct scsi_qla_hos + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + +- WRT_REG_DWORD(®->req_q_in, 0); +- WRT_REG_DWORD(®->req_q_out, 0); ++ wrt_reg_dword(®->req_q_in, 0); ++ wrt_reg_dword(®->req_q_out, 0); + +- WRT_REG_DWORD(®->rsp_q_in, 0); +- WRT_REG_DWORD(®->rsp_q_out, 0); ++ wrt_reg_dword(®->rsp_q_in, 0); ++ wrt_reg_dword(®->rsp_q_out, 0); + + /* PCI posting */ +- RD_REG_DWORD(®->rsp_q_out); ++ rd_reg_dword(®->rsp_q_out); + } + + char * +@@ -912,9 +912,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *v + /* 30 seconds wait - Adjust if required */ + wait_time = 30; + +- pseudo_aen = RD_REG_DWORD(®->pseudoaen); ++ pseudo_aen = rd_reg_dword(®->pseudoaen); + if (pseudo_aen == 1) { +- aenmbx7 = RD_REG_DWORD(®->initval7); ++ aenmbx7 = rd_reg_dword(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + rval = qlafx00_driver_shutdown(vha, 10); +@@ -925,7 +925,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *v + /* wait time before firmware ready */ + wtime = jiffies + (wait_time * HZ); + do { +- aenmbx = RD_REG_DWORD(®->aenmailbox0); ++ aenmbx = rd_reg_dword(®->aenmailbox0); + barrier(); + ql_dbg(ql_dbg_mbx, vha, 0x0133, + "aenmbx: 0x%x\n", aenmbx); +@@ -944,15 +944,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *v + + case MBA_FW_RESTART_CMPLT: + /* Set the mbx and rqstq intr code */ +- aenmbx7 = RD_REG_DWORD(®->aenmailbox7); ++ aenmbx7 = rd_reg_dword(®->aenmailbox7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); +- ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); +- ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); +- ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); +- ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); +- WRT_REG_DWORD(®->aenmailbox0, 0); +- RD_REG_DWORD_RELAXED(®->aenmailbox0); ++ ha->req_que_off = rd_reg_dword(®->aenmailbox1); ++ ha->rsp_que_off = rd_reg_dword(®->aenmailbox3); ++ ha->req_que_len = rd_reg_dword(®->aenmailbox5); ++ ha->rsp_que_len = rd_reg_dword(®->aenmailbox6); ++ wrt_reg_dword(®->aenmailbox0, 0); ++ rd_reg_dword_relaxed(®->aenmailbox0); + ql_dbg(ql_dbg_init, vha, 0x0134, + "f/w returned mbx_intr_code: 0x%x, " + "rqstq_intr_code: 0x%x\n", +@@ -982,13 +982,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *v + * 3. issue Get FW State Mbox cmd to determine fw state + * Set the mbx and rqstq intr code from Shadow Regs + */ +- aenmbx7 = RD_REG_DWORD(®->initval7); ++ aenmbx7 = rd_reg_dword(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); +- ha->req_que_off = RD_REG_DWORD(®->initval1); +- ha->rsp_que_off = RD_REG_DWORD(®->initval3); +- ha->req_que_len = RD_REG_DWORD(®->initval5); +- ha->rsp_que_len = RD_REG_DWORD(®->initval6); ++ ha->req_que_off = rd_reg_dword(®->initval1); ++ ha->rsp_que_off = rd_reg_dword(®->initval3); ++ ha->req_que_len = rd_reg_dword(®->initval5); ++ ha->rsp_que_len = rd_reg_dword(®->initval6); + ql_dbg(ql_dbg_init, vha, 0x0135, + "f/w returned mbx_intr_code: 0x%x, " + "rqstq_intr_code: 0x%x\n", +@@ -1034,7 +1034,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *v + if (time_after_eq(jiffies, wtime)) { + ql_dbg(ql_dbg_init, vha, 0x0137, + "Init f/w failed: aen[7]: 0x%x\n", +- RD_REG_DWORD(®->aenmailbox7)); ++ rd_reg_dword(®->aenmailbox7)); + rval = QLA_FUNCTION_FAILED; + done = true; + break; +@@ -1428,7 +1428,7 @@ qlafx00_init_response_q_entries(struct r + pkt = rsp->ring_ptr; + for (cnt = 0; cnt < rsp->length; cnt++) { + pkt->signature = RESPONSE_PROCESSED; +- WRT_REG_DWORD((void __force __iomem *)&pkt->signature, ++ wrt_reg_dword((void __force __iomem *)&pkt->signature, + RESPONSE_PROCESSED); + pkt++; + } +@@ -1444,13 +1444,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha) + + qla2x00_request_irqs(ha, ha->rsp_q_map[0]); + +- aenmbx7 = RD_REG_DWORD(®->aenmailbox7); ++ aenmbx7 = rd_reg_dword(®->aenmailbox7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); +- ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); +- ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); +- ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); +- ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); ++ ha->req_que_off = rd_reg_dword(®->aenmailbox1); ++ ha->rsp_que_off = rd_reg_dword(®->aenmailbox3); ++ ha->req_que_len = rd_reg_dword(®->aenmailbox5); ++ ha->rsp_que_len = rd_reg_dword(®->aenmailbox6); + + ql_dbg(ql_dbg_disc, vha, 0x2094, + "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " +@@ -1495,7 +1495,7 @@ qlafx00_timer_routine(scsi_qla_host_t *v + (!test_bit(UNLOADING, &vha->dpc_flags)) && + (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && + (ha->mr.fw_hbt_en)) { +- fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); ++ fw_heart_beat = rd_reg_dword(®->fwheartbeat); + if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { + ha->mr.old_fw_hbt_cnt = fw_heart_beat; + ha->mr.fw_hbt_miss_cnt = 0; +@@ -1515,7 +1515,7 @@ qlafx00_timer_routine(scsi_qla_host_t *v + + if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { + /* Reset recovery to be performed in timer routine */ +- aenmbx0 = RD_REG_DWORD(®->aenmailbox0); ++ aenmbx0 = rd_reg_dword(®->aenmailbox0); + if (ha->mr.fw_reset_timer_exp) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); +@@ -2718,7 +2718,7 @@ qlafx00_process_response_queue(struct sc + uint16_t lreq_q_in = 0; + uint16_t lreq_q_out = 0; + +- lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); ++ lreq_q_in = rd_reg_dword(rsp->rsp_q_in); + lreq_q_out = rsp->ring_index; + + while (lreq_q_in != lreq_q_out) { +@@ -2780,7 +2780,7 @@ qlafx00_process_response_queue(struct sc + } + + /* Adjust ring index */ +- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); ++ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); + } + + /** +@@ -2811,9 +2811,9 @@ qlafx00_async_event(scsi_qla_host_t *vha + break; + + case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ +- ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); +- ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); +- ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); ++ ha->aenmb[1] = rd_reg_dword(®->aenmailbox1); ++ ha->aenmb[2] = rd_reg_dword(®->aenmailbox2); ++ ha->aenmb[3] = rd_reg_dword(®->aenmailbox3); + ql_dbg(ql_dbg_async, vha, 0x5077, + "Asynchronous port Update received " + "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", +@@ -2843,13 +2843,13 @@ qlafx00_async_event(scsi_qla_host_t *vha + break; + + default: +- ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); +- ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); +- ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); +- ha->aenmb[4] = RD_REG_DWORD(®->aenmailbox4); +- ha->aenmb[5] = RD_REG_DWORD(®->aenmailbox5); +- ha->aenmb[6] = RD_REG_DWORD(®->aenmailbox6); +- ha->aenmb[7] = RD_REG_DWORD(®->aenmailbox7); ++ ha->aenmb[1] = rd_reg_dword(®->aenmailbox1); ++ ha->aenmb[2] = rd_reg_dword(®->aenmailbox2); ++ ha->aenmb[3] = rd_reg_dword(®->aenmailbox3); ++ ha->aenmb[4] = rd_reg_dword(®->aenmailbox4); ++ ha->aenmb[5] = rd_reg_dword(®->aenmailbox5); ++ ha->aenmb[6] = rd_reg_dword(®->aenmailbox6); ++ ha->aenmb[7] = rd_reg_dword(®->aenmailbox7); + ql_dbg(ql_dbg_async, vha, 0x5078, + "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", + ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], +@@ -2882,7 +2882,7 @@ qlafx00_mbx_completion(scsi_qla_host_t * + wptr = ®->mailbox17; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { +- ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); ++ ha->mailbox_out32[cnt] = rd_reg_dword(wptr); + wptr++; + } + } +@@ -2936,13 +2936,13 @@ qlafx00_intr_handler(int irq, void *dev_ + break; + + if (stat & QLAFX00_INTR_MB_CMPLT) { +- mb[0] = RD_REG_DWORD(®->mailbox16); ++ mb[0] = rd_reg_dword(®->mailbox16); + qlafx00_mbx_completion(vha, mb[0]); + status |= MBX_INTERRUPT; + clr_intr |= QLAFX00_INTR_MB_CMPLT; + } + if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { +- ha->aenmb[0] = RD_REG_DWORD(®->aenmailbox0); ++ ha->aenmb[0] = rd_reg_dword(®->aenmailbox0); + qlafx00_async_event(vha); + clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; + } +@@ -3110,7 +3110,7 @@ qlafx00_start_scsi(srb_t *sp) + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { +- cnt = RD_REG_DWORD_RELAXED(req->req_q_out); ++ cnt = rd_reg_dword_relaxed(req->req_q_out); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; +@@ -3175,7 +3175,7 @@ qlafx00_start_scsi(srb_t *sp) + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); +--- a/drivers/scsi/qla2xxx/qla_mr.h ++++ b/drivers/scsi/qla2xxx/qla_mr.h +@@ -359,47 +359,47 @@ struct config_info_data { + #define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */ + + #define QLAFX00_SET_HST_INTR(ha, value) \ +- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ ++ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ + value) + + #define QLAFX00_CLR_HST_INTR(ha, value) \ +- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ ++ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + ~value) + + #define QLAFX00_RD_INTR_REG(ha) \ +- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) ++ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) + + #define QLAFX00_CLR_INTR_REG(ha, value) \ +- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ ++ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + ~value) + + #define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\ +- WRT_REG_DWORD((ha)->cregbase + off, val) ++ wrt_reg_dword((ha)->cregbase + off, val) + + #define QLAFX00_GET_HBA_SOC_REG(ha, off)\ +- RD_REG_DWORD((ha)->cregbase + off) ++ rd_reg_dword((ha)->cregbase + off) + + #define QLAFX00_HBA_RST_REG(ha, val)\ +- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val) ++ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val) + + #define QLAFX00_RD_ICNTRL_REG(ha) \ +- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) ++ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) + + #define QLAFX00_ENABLE_ICNTRL_REG(ha) \ +- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ ++ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \ + QLAFX00_ICR_ENB_MASK)) + + #define QLAFX00_DISABLE_ICNTRL_REG(ha) \ +- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ ++ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \ + QLAFX00_ICR_DIS_MASK)) + + #define QLAFX00_RD_REG(ha, off) \ +- RD_REG_DWORD((ha)->cregbase + off) ++ rd_reg_dword((ha)->cregbase + off) + + #define QLAFX00_WR_REG(ha, off, val) \ +- WRT_REG_DWORD((ha)->cregbase + off, val) ++ wrt_reg_dword((ha)->cregbase + off, val) + + struct qla_mt_iocb_rqst_fx00 { + __le32 reserved_0; +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -395,7 +395,7 @@ static inline int qla2x00_start_nvme_mq( + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out); ++ rd_reg_dword_relaxed(req->req_q_out); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; +@@ -525,7 +525,7 @@ static inline int qla2x00_start_nvme_mq( + } + + /* Set chip new ring index. */ +- WRT_REG_DWORD(req->req_q_in, req->ring_index); ++ wrt_reg_dword(req->req_q_in, req->ring_index); + + queuing_error: + spin_unlock_irqrestore(&qpair->qp_lock, flags); +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -370,7 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_ + /* Read back value to make sure write has gone through before trying + * to use it. + */ +- win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); ++ win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); + if (win_read != ha->crb_win) { + ql_dbg(ql_dbg_p3p, vha, 0xb000, + "%s: Written crbwin (0x%x) " +@@ -520,7 +520,7 @@ qla82xx_rd_32(struct qla_hw_data *ha, ul + qla82xx_crb_win_lock(ha); + qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); + } +- data = RD_REG_DWORD(off); ++ data = rd_reg_dword(off); + + if (rv == 1) { + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); +@@ -937,17 +937,17 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, + { + uint32_t off_value, rval = 0; + +- WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); ++ wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); + + /* Read back value to make sure write has gone through */ +- RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); ++ rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); + off_value = (off & 0x0000FFFF); + + if (flag) +- WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, ++ wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, + data); + else +- rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M + ++ rval = rd_reg_dword(off_value + CRB_INDIRECT_2M + + ha->nx_pcibase); + + return rval; +@@ -1790,9 +1790,9 @@ void qla82xx_config_rings(struct scsi_ql + put_unaligned_le64(req->dma, &icb->request_q_address); + put_unaligned_le64(rsp->dma, &icb->response_q_address); + +- WRT_REG_DWORD(®->req_q_out[0], 0); +- WRT_REG_DWORD(®->rsp_q_in[0], 0); +- WRT_REG_DWORD(®->rsp_q_out[0], 0); ++ wrt_reg_dword(®->req_q_out[0], 0); ++ wrt_reg_dword(®->rsp_q_in[0], 0); ++ wrt_reg_dword(®->rsp_q_out[0], 0); + } + + static int +@@ -2007,7 +2007,7 @@ qla82xx_mbx_completion(scsi_qla_host_t * + ha->mailbox_out[0] = mb0; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { +- ha->mailbox_out[cnt] = RD_REG_WORD(wptr); ++ ha->mailbox_out[cnt] = rd_reg_word(wptr); + wptr++; + } + +@@ -2069,8 +2069,8 @@ qla82xx_intr_handler(int irq, void *dev_ + vha = pci_get_drvdata(ha->pdev); + for (iter = 1; iter--; ) { + +- if (RD_REG_DWORD(®->host_int)) { +- stat = RD_REG_DWORD(®->host_status); ++ if (rd_reg_dword(®->host_int)) { ++ stat = rd_reg_dword(®->host_status); + + switch (stat & 0xff) { + case 0x1: +@@ -2082,9 +2082,9 @@ qla82xx_intr_handler(int irq, void *dev_ + break; + case 0x12: + mb[0] = MSW(stat); +- mb[1] = RD_REG_WORD(®->mailbox_out[1]); +- mb[2] = RD_REG_WORD(®->mailbox_out[2]); +- mb[3] = RD_REG_WORD(®->mailbox_out[3]); ++ mb[1] = rd_reg_word(®->mailbox_out[1]); ++ mb[2] = rd_reg_word(®->mailbox_out[2]); ++ mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: +@@ -2097,7 +2097,7 @@ qla82xx_intr_handler(int irq, void *dev_ + break; + } + } +- WRT_REG_DWORD(®->host_int, 0); ++ wrt_reg_dword(®->host_int, 0); + } + + qla2x00_handle_mbx_completion(ha, status); +@@ -2135,11 +2135,11 @@ qla82xx_msix_default(int irq, void *dev_ + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + do { +- host_int = RD_REG_DWORD(®->host_int); ++ host_int = rd_reg_dword(®->host_int); + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) + break; + if (host_int) { +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + + switch (stat & 0xff) { + case 0x1: +@@ -2151,9 +2151,9 @@ qla82xx_msix_default(int irq, void *dev_ + break; + case 0x12: + mb[0] = MSW(stat); +- mb[1] = RD_REG_WORD(®->mailbox_out[1]); +- mb[2] = RD_REG_WORD(®->mailbox_out[2]); +- mb[3] = RD_REG_WORD(®->mailbox_out[3]); ++ mb[1] = rd_reg_word(®->mailbox_out[1]); ++ mb[2] = rd_reg_word(®->mailbox_out[2]); ++ mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: +@@ -2166,7 +2166,7 @@ qla82xx_msix_default(int irq, void *dev_ + break; + } + } +- WRT_REG_DWORD(®->host_int, 0); ++ wrt_reg_dword(®->host_int, 0); + } while (0); + + qla2x00_handle_mbx_completion(ha, status); +@@ -2196,11 +2196,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id + reg = &ha->iobase->isp82; + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); +- host_int = RD_REG_DWORD(®->host_int); ++ host_int = rd_reg_dword(®->host_int); + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) + goto out; + qla24xx_process_response_queue(vha, rsp); +- WRT_REG_DWORD(®->host_int, 0); ++ wrt_reg_dword(®->host_int, 0); + out: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return IRQ_HANDLED; +@@ -2231,11 +2231,11 @@ qla82xx_poll(int irq, void *dev_id) + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + +- host_int = RD_REG_DWORD(®->host_int); ++ host_int = rd_reg_dword(®->host_int); + if (qla2x00_check_reg32_for_disconnect(vha, host_int)) + goto out; + if (host_int) { +- stat = RD_REG_DWORD(®->host_status); ++ stat = rd_reg_dword(®->host_status); + switch (stat & 0xff) { + case 0x1: + case 0x2: +@@ -2246,9 +2246,9 @@ qla82xx_poll(int irq, void *dev_id) + break; + case 0x12: + mb[0] = MSW(stat); +- mb[1] = RD_REG_WORD(®->mailbox_out[1]); +- mb[2] = RD_REG_WORD(®->mailbox_out[2]); +- mb[3] = RD_REG_WORD(®->mailbox_out[3]); ++ mb[1] = rd_reg_word(®->mailbox_out[1]); ++ mb[2] = rd_reg_word(®->mailbox_out[2]); ++ mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: +@@ -2260,7 +2260,7 @@ qla82xx_poll(int irq, void *dev_id) + stat * 0xff); + break; + } +- WRT_REG_DWORD(®->host_int, 0); ++ wrt_reg_dword(®->host_int, 0); + } + out: + spin_unlock_irqrestore(&ha->hardware_lock, flags); +@@ -2818,10 +2818,10 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha + if (ql2xdbwr) + qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval); + else { +- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); ++ wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); +- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { +- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); ++ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { ++ wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + wmb(); + } + } +@@ -3854,7 +3854,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_ + loop_cnt = ocm_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { +- r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase); ++ r_value = rd_reg_dword(r_addr + ha->nx_pcibase); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } +--- a/drivers/scsi/qla2xxx/qla_nx2.c ++++ b/drivers/scsi/qla2xxx/qla_nx2.c +@@ -3946,8 +3946,8 @@ qla8044_intr_handler(int irq, void *dev_ + spin_lock_irqsave(&ha->hardware_lock, flags); + for (iter = 1; iter--; ) { + +- if (RD_REG_DWORD(®->host_int)) { +- stat = RD_REG_DWORD(®->host_status); ++ if (rd_reg_dword(®->host_int)) { ++ stat = rd_reg_dword(®->host_status); + if ((stat & HSRX_RISC_INT) == 0) + break; + +@@ -3961,9 +3961,9 @@ qla8044_intr_handler(int irq, void *dev_ + break; + case 0x12: + mb[0] = MSW(stat); +- mb[1] = RD_REG_WORD(®->mailbox_out[1]); +- mb[2] = RD_REG_WORD(®->mailbox_out[2]); +- mb[3] = RD_REG_WORD(®->mailbox_out[3]); ++ mb[1] = rd_reg_word(®->mailbox_out[1]); ++ mb[2] = rd_reg_word(®->mailbox_out[2]); ++ mb[3] = rd_reg_word(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: +@@ -3976,7 +3976,7 @@ qla8044_intr_handler(int irq, void *dev_ + break; + } + } +- WRT_REG_DWORD(®->host_int, 0); ++ wrt_reg_dword(®->host_int, 0); + } + + qla2x00_handle_mbx_completion(ha, status); +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -1227,9 +1227,9 @@ uint32_t qla2x00_isp_reg_stat(struct qla + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + + if (IS_P3P_TYPE(ha)) +- return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); ++ return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT); + else +- return ((RD_REG_DWORD(®->host_status)) == ++ return ((rd_reg_dword(®->host_status)) == + ISP_REG_DISCONNECT); + } + +@@ -1913,8 +1913,8 @@ qla2x00_enable_intrs(struct qla_hw_data + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 1; + /* enable risc and host interrupts */ +- WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); +- RD_REG_WORD(®->ictrl); ++ wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC); ++ rd_reg_word(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + } +@@ -1928,8 +1928,8 @@ qla2x00_disable_intrs(struct qla_hw_data + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 0; + /* disable risc and host interrupts */ +- WRT_REG_WORD(®->ictrl, 0); +- RD_REG_WORD(®->ictrl); ++ wrt_reg_word(®->ictrl, 0); ++ rd_reg_word(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + +@@ -1941,8 +1941,8 @@ qla24xx_enable_intrs(struct qla_hw_data + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 1; +- WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); +- RD_REG_DWORD(®->ictrl); ++ wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT); ++ rd_reg_dword(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + +@@ -1956,8 +1956,8 @@ qla24xx_disable_intrs(struct qla_hw_data + return; + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 0; +- WRT_REG_DWORD(®->ictrl, 0); +- RD_REG_DWORD(®->ictrl); ++ wrt_reg_dword(®->ictrl, 0); ++ rd_reg_dword(®->ictrl); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + +@@ -7569,15 +7569,15 @@ qla2xxx_pci_mmio_enabled(struct pci_dev + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (IS_QLA2100(ha) || IS_QLA2200(ha)){ +- stat = RD_REG_WORD(®->hccr); ++ stat = rd_reg_word(®->hccr); + if (stat & HCCR_RISC_PAUSE) + risc_paused = 1; + } else if (IS_QLA23XX(ha)) { +- stat = RD_REG_DWORD(®->u.isp2300.host_status); ++ stat = rd_reg_dword(®->u.isp2300.host_status); + if (stat & HSR_RISC_PAUSED) + risc_paused = 1; + } else if (IS_FWI2_CAPABLE(ha)) { +- stat = RD_REG_DWORD(®24->host_status); ++ stat = rd_reg_dword(®24->host_status); + if (stat & HSRX_RISC_PAUSED) + risc_paused = 1; + } +--- a/drivers/scsi/qla2xxx/qla_sup.c ++++ b/drivers/scsi/qla2xxx/qla_sup.c +@@ -26,24 +26,24 @@ qla2x00_lock_nvram_access(struct qla_hw_ + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { +- data = RD_REG_WORD(®->nvram); ++ data = rd_reg_word(®->nvram); + while (data & NVR_BUSY) { + udelay(100); +- data = RD_REG_WORD(®->nvram); ++ data = rd_reg_word(®->nvram); + } + + /* Lock resource */ +- WRT_REG_WORD(®->u.isp2300.host_semaphore, 0x1); +- RD_REG_WORD(®->u.isp2300.host_semaphore); ++ wrt_reg_word(®->u.isp2300.host_semaphore, 0x1); ++ rd_reg_word(®->u.isp2300.host_semaphore); + udelay(5); +- data = RD_REG_WORD(®->u.isp2300.host_semaphore); ++ data = rd_reg_word(®->u.isp2300.host_semaphore); + while ((data & BIT_0) == 0) { + /* Lock failed */ + udelay(100); +- WRT_REG_WORD(®->u.isp2300.host_semaphore, 0x1); +- RD_REG_WORD(®->u.isp2300.host_semaphore); ++ wrt_reg_word(®->u.isp2300.host_semaphore, 0x1); ++ rd_reg_word(®->u.isp2300.host_semaphore); + udelay(5); +- data = RD_REG_WORD(®->u.isp2300.host_semaphore); ++ data = rd_reg_word(®->u.isp2300.host_semaphore); + } + } + } +@@ -58,8 +58,8 @@ qla2x00_unlock_nvram_access(struct qla_h + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + + if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { +- WRT_REG_WORD(®->u.isp2300.host_semaphore, 0); +- RD_REG_WORD(®->u.isp2300.host_semaphore); ++ wrt_reg_word(®->u.isp2300.host_semaphore, 0); ++ rd_reg_word(®->u.isp2300.host_semaphore); + } + } + +@@ -73,15 +73,15 @@ qla2x00_nv_write(struct qla_hw_data *ha, + { + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +- WRT_REG_WORD(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); +- WRT_REG_WORD(®->nvram, data | NVR_SELECT | NVR_CLOCK | ++ wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_CLOCK | + NVR_WRT_ENABLE); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); +- WRT_REG_WORD(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + } + +@@ -120,21 +120,21 @@ qla2x00_nvram_request(struct qla_hw_data + + /* Read data from NVRAM. */ + for (cnt = 0; cnt < 16; cnt++) { +- WRT_REG_WORD(®->nvram, NVR_SELECT | NVR_CLOCK); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT | NVR_CLOCK); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + data <<= 1; +- reg_data = RD_REG_WORD(®->nvram); ++ reg_data = rd_reg_word(®->nvram); + if (reg_data & NVR_DATA_IN) + data |= BIT_0; +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + } + + /* Deselect chip. */ +- WRT_REG_WORD(®->nvram, NVR_DESELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_DESELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + + return data; +@@ -171,8 +171,8 @@ qla2x00_nv_deselect(struct qla_hw_data * + { + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +- WRT_REG_WORD(®->nvram, NVR_DESELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_DESELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + NVRAM_DELAY(); + } + +@@ -216,8 +216,8 @@ qla2x00_write_nvram_word(struct qla_hw_d + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready */ +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + wait_cnt = NVR_WAIT_CNT; + do { + if (!--wait_cnt) { +@@ -226,7 +226,7 @@ qla2x00_write_nvram_word(struct qla_hw_d + break; + } + NVRAM_DELAY(); +- word = RD_REG_WORD(®->nvram); ++ word = rd_reg_word(®->nvram); + } while ((word & NVR_DATA_IN) == 0); + + qla2x00_nv_deselect(ha); +@@ -275,11 +275,11 @@ qla2x00_write_nvram_word_tmo(struct qla_ + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready */ +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + do { + NVRAM_DELAY(); +- word = RD_REG_WORD(®->nvram); ++ word = rd_reg_word(®->nvram); + if (!--tmo) { + ret = QLA_FUNCTION_FAILED; + break; +@@ -347,8 +347,8 @@ qla2x00_clear_nvram_protection(struct ql + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready. */ +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + wait_cnt = NVR_WAIT_CNT; + do { + if (!--wait_cnt) { +@@ -357,7 +357,7 @@ qla2x00_clear_nvram_protection(struct ql + break; + } + NVRAM_DELAY(); +- word = RD_REG_WORD(®->nvram); ++ word = rd_reg_word(®->nvram); + } while ((word & NVR_DATA_IN) == 0); + + if (wait_cnt) +@@ -407,8 +407,8 @@ qla2x00_set_nvram_protection(struct qla_ + qla2x00_nv_deselect(ha); + + /* Wait for NVRAM to become ready. */ +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + wait_cnt = NVR_WAIT_CNT; + do { + if (!--wait_cnt) { +@@ -417,7 +417,7 @@ qla2x00_set_nvram_protection(struct qla_ + break; + } + NVRAM_DELAY(); +- word = RD_REG_WORD(®->nvram); ++ word = rd_reg_word(®->nvram); + } while ((word & NVR_DATA_IN) == 0); + } + +@@ -456,11 +456,11 @@ qla24xx_read_flash_dword(struct qla_hw_d + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 30000; + +- WRT_REG_DWORD(®->flash_addr, addr & ~FARX_DATA_FLAG); ++ wrt_reg_dword(®->flash_addr, addr & ~FARX_DATA_FLAG); + + while (cnt--) { +- if (RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG) { +- *data = RD_REG_DWORD(®->flash_data); ++ if (rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG) { ++ *data = rd_reg_dword(®->flash_data); + return QLA_SUCCESS; + } + udelay(10); +@@ -499,11 +499,11 @@ qla24xx_write_flash_dword(struct qla_hw_ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 500000; + +- WRT_REG_DWORD(®->flash_data, data); +- WRT_REG_DWORD(®->flash_addr, addr | FARX_DATA_FLAG); ++ wrt_reg_dword(®->flash_data, data); ++ wrt_reg_dword(®->flash_addr, addr | FARX_DATA_FLAG); + + while (cnt--) { +- if (!(RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG)) ++ if (!(rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG)) + return QLA_SUCCESS; + udelay(10); + cond_resched(); +@@ -1197,9 +1197,9 @@ qla24xx_unprotect_flash(scsi_qla_host_t + return qla81xx_fac_do_write_enable(vha, 1); + + /* Enable flash write. */ +- WRT_REG_DWORD(®->ctrl_status, +- RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); +- RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_dword(®->ctrl_status, ++ rd_reg_dword(®->ctrl_status) | CSRX_FLASH_ENABLE); ++ rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + + if (!ha->fdt_wrt_disable) + goto done; +@@ -1240,8 +1240,8 @@ qla24xx_protect_flash(scsi_qla_host_t *v + + skip_wrt_protect: + /* Disable flash write. */ +- WRT_REG_DWORD(®->ctrl_status, +- RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); ++ wrt_reg_dword(®->ctrl_status, ++ rd_reg_dword(®->ctrl_status) & ~CSRX_FLASH_ENABLE); + + return QLA_SUCCESS; + } +@@ -1466,9 +1466,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t + return ret; + + /* Enable flash write. */ +- WRT_REG_DWORD(®->ctrl_status, +- RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); +- RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_dword(®->ctrl_status, ++ rd_reg_dword(®->ctrl_status) | CSRX_FLASH_ENABLE); ++ rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + + /* Disable NVRAM write-protection. */ + qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); +@@ -1490,9 +1490,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t + qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c); + + /* Disable flash write. */ +- WRT_REG_DWORD(®->ctrl_status, +- RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); +- RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_dword(®->ctrl_status, ++ rd_reg_dword(®->ctrl_status) & ~CSRX_FLASH_ENABLE); ++ rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + + return ret; + } +@@ -1588,8 +1588,8 @@ qla2x00_beacon_blink(struct scsi_qla_hos + gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); + gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); + } else { +- gpio_enable = RD_REG_WORD(®->gpioe); +- gpio_data = RD_REG_WORD(®->gpiod); ++ gpio_enable = rd_reg_word(®->gpioe); ++ gpio_data = rd_reg_word(®->gpiod); + } + + /* Set the modified gpio_enable values */ +@@ -1598,8 +1598,8 @@ qla2x00_beacon_blink(struct scsi_qla_hos + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); + } else { +- WRT_REG_WORD(®->gpioe, gpio_enable); +- RD_REG_WORD(®->gpioe); ++ wrt_reg_word(®->gpioe, gpio_enable); ++ rd_reg_word(®->gpioe); + } + + qla2x00_flip_colors(ha, &led_color); +@@ -1614,8 +1614,8 @@ qla2x00_beacon_blink(struct scsi_qla_hos + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); + } else { +- WRT_REG_WORD(®->gpiod, gpio_data); +- RD_REG_WORD(®->gpiod); ++ wrt_reg_word(®->gpiod, gpio_data); ++ rd_reg_word(®->gpiod); + } + + spin_unlock_irqrestore(&ha->hardware_lock, flags); +@@ -1645,8 +1645,8 @@ qla2x00_beacon_on(struct scsi_qla_host * + gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); + gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); + } else { +- gpio_enable = RD_REG_WORD(®->gpioe); +- gpio_data = RD_REG_WORD(®->gpiod); ++ gpio_enable = rd_reg_word(®->gpioe); ++ gpio_data = rd_reg_word(®->gpiod); + } + gpio_enable |= GPIO_LED_MASK; + +@@ -1654,8 +1654,8 @@ qla2x00_beacon_on(struct scsi_qla_host * + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); + } else { +- WRT_REG_WORD(®->gpioe, gpio_enable); +- RD_REG_WORD(®->gpioe); ++ wrt_reg_word(®->gpioe, gpio_enable); ++ rd_reg_word(®->gpioe); + } + + /* Clear out previously set LED colour. */ +@@ -1663,8 +1663,8 @@ qla2x00_beacon_on(struct scsi_qla_host * + if (ha->pio_address) { + WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); + } else { +- WRT_REG_WORD(®->gpiod, gpio_data); +- RD_REG_WORD(®->gpiod); ++ wrt_reg_word(®->gpiod, gpio_data); ++ rd_reg_word(®->gpiod); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + +@@ -1731,13 +1731,13 @@ qla24xx_beacon_blink(struct scsi_qla_hos + + /* Save the Original GPIOD. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- gpio_data = RD_REG_DWORD(®->gpiod); ++ gpio_data = rd_reg_dword(®->gpiod); + + /* Enable the gpio_data reg for update. */ + gpio_data |= GPDX_LED_UPDATE_MASK; + +- WRT_REG_DWORD(®->gpiod, gpio_data); +- gpio_data = RD_REG_DWORD(®->gpiod); ++ wrt_reg_dword(®->gpiod, gpio_data); ++ gpio_data = rd_reg_dword(®->gpiod); + + /* Set the color bits. */ + qla24xx_flip_colors(ha, &led_color); +@@ -1749,8 +1749,8 @@ qla24xx_beacon_blink(struct scsi_qla_hos + gpio_data |= led_color; + + /* Set the modified gpio_data values. */ +- WRT_REG_DWORD(®->gpiod, gpio_data); +- gpio_data = RD_REG_DWORD(®->gpiod); ++ wrt_reg_dword(®->gpiod, gpio_data); ++ gpio_data = rd_reg_dword(®->gpiod); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + +@@ -1881,12 +1881,12 @@ qla24xx_beacon_on(struct scsi_qla_host * + goto skip_gpio; + + spin_lock_irqsave(&ha->hardware_lock, flags); +- gpio_data = RD_REG_DWORD(®->gpiod); ++ gpio_data = rd_reg_dword(®->gpiod); + + /* Enable the gpio_data reg for update. */ + gpio_data |= GPDX_LED_UPDATE_MASK; +- WRT_REG_DWORD(®->gpiod, gpio_data); +- RD_REG_DWORD(®->gpiod); ++ wrt_reg_dword(®->gpiod, gpio_data); ++ rd_reg_dword(®->gpiod); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } +@@ -1929,12 +1929,12 @@ qla24xx_beacon_off(struct scsi_qla_host + + /* Give control back to firmware. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- gpio_data = RD_REG_DWORD(®->gpiod); ++ gpio_data = rd_reg_dword(®->gpiod); + + /* Disable the gpio_data reg for update. */ + gpio_data &= ~GPDX_LED_UPDATE_MASK; +- WRT_REG_DWORD(®->gpiod, gpio_data); +- RD_REG_DWORD(®->gpiod); ++ wrt_reg_dword(®->gpiod, gpio_data); ++ rd_reg_dword(®->gpiod); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + set_fw_options: +@@ -1970,10 +1970,10 @@ qla2x00_flash_enable(struct qla_hw_data + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +- data = RD_REG_WORD(®->ctrl_status); ++ data = rd_reg_word(®->ctrl_status); + data |= CSR_FLASH_ENABLE; +- WRT_REG_WORD(®->ctrl_status, data); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, data); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + + /** +@@ -1986,10 +1986,10 @@ qla2x00_flash_disable(struct qla_hw_data + uint16_t data; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +- data = RD_REG_WORD(®->ctrl_status); ++ data = rd_reg_word(®->ctrl_status); + data &= ~(CSR_FLASH_ENABLE); +- WRT_REG_WORD(®->ctrl_status, data); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, data); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + + /** +@@ -2008,7 +2008,7 @@ qla2x00_read_flash_byte(struct qla_hw_da + uint16_t bank_select; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +- bank_select = RD_REG_WORD(®->ctrl_status); ++ bank_select = rd_reg_word(®->ctrl_status); + + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + /* Specify 64K address range: */ +@@ -2016,11 +2016,11 @@ qla2x00_read_flash_byte(struct qla_hw_da + bank_select &= ~0xf8; + bank_select |= addr >> 12 & 0xf0; + bank_select |= CSR_FLASH_64K_BANK; +- WRT_REG_WORD(®->ctrl_status, bank_select); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, bank_select); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + +- WRT_REG_WORD(®->flash_address, (uint16_t)addr); +- data = RD_REG_WORD(®->flash_data); ++ wrt_reg_word(®->flash_address, (uint16_t)addr); ++ data = rd_reg_word(®->flash_data); + + return (uint8_t)data; + } +@@ -2028,13 +2028,13 @@ qla2x00_read_flash_byte(struct qla_hw_da + /* Setup bit 16 of flash address. */ + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { + bank_select |= CSR_FLASH_64K_BANK; +- WRT_REG_WORD(®->ctrl_status, bank_select); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, bank_select); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } else if (((addr & BIT_16) == 0) && + (bank_select & CSR_FLASH_64K_BANK)) { + bank_select &= ~(CSR_FLASH_64K_BANK); +- WRT_REG_WORD(®->ctrl_status, bank_select); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, bank_select); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + + /* Always perform IO mapped accesses to the FLASH registers. */ +@@ -2049,7 +2049,7 @@ qla2x00_read_flash_byte(struct qla_hw_da + data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); + } while (data != data2); + } else { +- WRT_REG_WORD(®->flash_address, (uint16_t)addr); ++ wrt_reg_word(®->flash_address, (uint16_t)addr); + data = qla2x00_debounce_register(®->flash_data); + } + +@@ -2068,20 +2068,20 @@ qla2x00_write_flash_byte(struct qla_hw_d + uint16_t bank_select; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +- bank_select = RD_REG_WORD(®->ctrl_status); ++ bank_select = rd_reg_word(®->ctrl_status); + if (IS_QLA2322(ha) || IS_QLA6322(ha)) { + /* Specify 64K address range: */ + /* clear out Module Select and Flash Address bits [19:16]. */ + bank_select &= ~0xf8; + bank_select |= addr >> 12 & 0xf0; + bank_select |= CSR_FLASH_64K_BANK; +- WRT_REG_WORD(®->ctrl_status, bank_select); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, bank_select); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + +- WRT_REG_WORD(®->flash_address, (uint16_t)addr); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ +- WRT_REG_WORD(®->flash_data, (uint16_t)data); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->flash_address, (uint16_t)addr); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->flash_data, (uint16_t)data); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + + return; + } +@@ -2089,13 +2089,13 @@ qla2x00_write_flash_byte(struct qla_hw_d + /* Setup bit 16 of flash address. */ + if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { + bank_select |= CSR_FLASH_64K_BANK; +- WRT_REG_WORD(®->ctrl_status, bank_select); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, bank_select); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } else if (((addr & BIT_16) == 0) && + (bank_select & CSR_FLASH_64K_BANK)) { + bank_select &= ~(CSR_FLASH_64K_BANK); +- WRT_REG_WORD(®->ctrl_status, bank_select); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->ctrl_status, bank_select); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + + /* Always perform IO mapped accesses to the FLASH registers. */ +@@ -2103,10 +2103,10 @@ qla2x00_write_flash_byte(struct qla_hw_d + WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); + WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data); + } else { +- WRT_REG_WORD(®->flash_address, (uint16_t)addr); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ +- WRT_REG_WORD(®->flash_data, (uint16_t)data); +- RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->flash_address, (uint16_t)addr); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ ++ wrt_reg_word(®->flash_data, (uint16_t)data); ++ rd_reg_word(®->ctrl_status); /* PCI Posting. */ + } + } + +@@ -2289,12 +2289,12 @@ qla2x00_read_flash_data(struct qla_hw_da + + midpoint = length / 2; + +- WRT_REG_WORD(®->nvram, 0); +- RD_REG_WORD(®->nvram); ++ wrt_reg_word(®->nvram, 0); ++ rd_reg_word(®->nvram); + for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) { + if (ilength == midpoint) { +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); + } + data = qla2x00_read_flash_byte(ha, saddr); + if (saddr % 100) +@@ -2319,11 +2319,11 @@ qla2x00_suspend_hba(struct scsi_qla_host + + /* Pause RISC. */ + spin_lock_irqsave(&ha->hardware_lock, flags); +- WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); +- RD_REG_WORD(®->hccr); ++ wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); ++ rd_reg_word(®->hccr); + if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { + for (cnt = 0; cnt < 30000; cnt++) { +- if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) ++ if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) + break; + udelay(100); + } +@@ -2362,12 +2362,12 @@ qla2x00_read_optrom_data(struct scsi_qla + midpoint = ha->optrom_size / 2; + + qla2x00_flash_enable(ha); +- WRT_REG_WORD(®->nvram, 0); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, 0); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + for (addr = offset, data = buf; addr < length; addr++, data++) { + if (addr == midpoint) { +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); /* PCI Posting. */ ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); /* PCI Posting. */ + } + + *data = qla2x00_read_flash_byte(ha, addr); +@@ -2399,7 +2399,7 @@ qla2x00_write_optrom_data(struct scsi_ql + sec_number = 0; + + /* Reset ISP chip. */ +- WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); ++ wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); + + /* Go with write. */ +@@ -2548,8 +2548,8 @@ qla2x00_write_optrom_data(struct scsi_ql + } + } + } else if (addr == ha->optrom_size / 2) { +- WRT_REG_WORD(®->nvram, NVR_SELECT); +- RD_REG_WORD(®->nvram); ++ wrt_reg_word(®->nvram, NVR_SELECT); ++ rd_reg_word(®->nvram); + } + + if (flash_id == 0xda && man_id == 0xc1) { +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -2493,7 +2493,7 @@ static int qlt_check_reserve_free_req(st + + if (req->cnt < (req_cnt + 2)) { + cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : +- RD_REG_DWORD_RELAXED(req->req_q_out)); ++ rd_reg_dword_relaxed(req->req_q_out)); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; +@@ -6802,7 +6802,7 @@ qlt_24xx_process_atio_queue(struct scsi_ + } + + /* Adjust ring index */ +- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); ++ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); + } + + void +@@ -6815,9 +6815,9 @@ qlt_24xx_config_rings(struct scsi_qla_ho + if (!QLA_TGT_MODE_ENABLED()) + return; + +- WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); +- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); +- RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); ++ wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); ++ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); ++ rd_reg_dword(ISP_ATIO_Q_OUT(vha)); + + if (ha->flags.msix_enabled) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -22,9 +22,9 @@ qla27xx_write_remote_reg(struct scsi_qla + ql_dbg(ql_dbg_misc, vha, 0xd300, + "%s: addr/data = %xh/%xh\n", __func__, addr, data); + +- WRT_REG_DWORD(®->iobase_addr, 0x40); +- WRT_REG_DWORD(®->iobase_c4, data); +- WRT_REG_DWORD(®->iobase_window, addr); ++ wrt_reg_dword(®->iobase_addr, 0x40); ++ wrt_reg_dword(®->iobase_c4, data); ++ wrt_reg_dword(®->iobase_window, addr); + } + + void +@@ -75,7 +75,7 @@ qla27xx_read8(void __iomem *window, void + uint8_t value = ~0; + + if (buf) { +- value = RD_REG_BYTE(window); ++ value = rd_reg_byte(window); + } + qla27xx_insert32(value, buf, len); + } +@@ -86,7 +86,7 @@ qla27xx_read16(void __iomem *window, voi + uint16_t value = ~0; + + if (buf) { +- value = RD_REG_WORD(window); ++ value = rd_reg_word(window); + } + qla27xx_insert32(value, buf, len); + } +@@ -97,7 +97,7 @@ qla27xx_read32(void __iomem *window, voi + uint32_t value = ~0; + + if (buf) { +- value = RD_REG_DWORD(window); ++ value = rd_reg_dword(window); + } + qla27xx_insert32(value, buf, len); + } +@@ -126,7 +126,7 @@ qla27xx_write_reg(__iomem struct device_ + if (buf) { + void __iomem *window = (void __iomem *)reg + offset; + +- WRT_REG_DWORD(window, data); ++ wrt_reg_dword(window, data); + } + } + diff --git a/patches.suse/scsi-qla2xxx-Change-in-PUREX-to-handle-FPIN-ELS-requ.patch b/patches.suse/scsi-qla2xxx-Change-in-PUREX-to-handle-FPIN-ELS-requ.patch new file mode 100644 index 0000000..c343d45 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Change-in-PUREX-to-handle-FPIN-ELS-requ.patch @@ -0,0 +1,363 @@ +From: Shyam Sundar +Date: Tue, 30 Jun 2020 03:22:28 -0700 +Subject: scsi: qla2xxx: Change in PUREX to handle FPIN ELS requests +Patch-mainline: v5.9-rc1 +Git-commit: 62e9dd177732843ae6c5b9d2ed61e7c9538fa276 +References: bsc#1171688 bsc#1174003 + +SAN Congestion Management generates ELS pkts whose size can vary and be > +64 bytes. Change the PUREX handling code to support non-standard ELS pkt +size. + +Link: https://lore.kernel.org/r/20200630102229.29660-2-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Shyam Sundar +Signed-off-by: Arun Easi +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_def.h | 15 ++++- + drivers/scsi/qla2xxx/qla_gbl.h | 3 - + drivers/scsi/qla2xxx/qla_isr.c | 116 ++++++++++++++++++++++++++++++----------- + drivers/scsi/qla2xxx/qla_mbx.c | 22 ++++++- + drivers/scsi/qla2xxx/qla_os.c | 19 +++++- + include/uapi/scsi/fc/fc_els.h | 2 + 6 files changed, 134 insertions(+), 43 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -34,6 +34,8 @@ + #include + #include + ++#include ++ + /* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ + typedef struct { + uint8_t domain; +@@ -1304,7 +1306,6 @@ static inline bool qla2xxx_is_valid_mbs( + #define RNID_TYPE_ASIC_TEMP 0xC + + #define ELS_CMD_MAP_SIZE 32 +-#define ELS_COMMAND_RDP 0x18 + + /* + * Firmware state codes from get firmware state mailbox command +@@ -4522,10 +4523,19 @@ struct active_regions { + #define QLA_SET_DATA_RATE_NOLR 1 + #define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */ + ++#define QLA_DEFAULT_PAYLOAD_SIZE 64 ++/* ++ * This item might be allocated with a size > sizeof(struct purex_item). ++ * The "size" variable gives the size of the payload (which ++ * is variable) starting at "iocb". ++ */ + struct purex_item { + struct list_head list; + struct scsi_qla_host *vha; +- void (*process_item)(struct scsi_qla_host *vha, void *pkt); ++ void (*process_item)(struct scsi_qla_host *vha, ++ struct purex_item *pkt); ++ atomic_t in_use; ++ uint16_t size; + struct { + uint8_t iocb[64]; + } iocb; +@@ -4725,6 +4735,7 @@ typedef struct scsi_qla_host { + struct list_head head; + spinlock_t lock; + } purex_list; ++ struct purex_item default_item; + + struct name_list_extended gnl; + /* Count of active session/fcport */ +--- a/drivers/scsi/qla2xxx/qla_gbl.h ++++ b/drivers/scsi/qla2xxx/qla_gbl.h +@@ -229,7 +229,8 @@ void qla2x00_handle_login_done_event(str + int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); + int qla24xx_post_relogin_work(struct scsi_qla_host *vha); + void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); +-void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt); ++void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, ++ struct purex_item *pkt); + + /* + * Global Functions in qla_mid.c source file. +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -31,35 +31,11 @@ const char *const port_state_str[] = { + "ONLINE" + }; + +-static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt, +- void (*process_item)(struct scsi_qla_host *vha, void *pkt)) +-{ +- struct purex_list *list = &vha->purex_list; +- struct purex_item *item; +- ulong flags; +- +- item = kzalloc(sizeof(*item), GFP_KERNEL); +- if (!item) { +- ql_log(ql_log_warn, vha, 0x5092, +- ">> Failed allocate purex list item.\n"); +- return; +- } +- +- item->vha = vha; +- item->process_item = process_item; +- memcpy(&item->iocb, pkt, sizeof(item->iocb)); +- +- spin_lock_irqsave(&list->lock, flags); +- list_add_tail(&item->list, &list->head); +- spin_unlock_irqrestore(&list->lock, flags); +- +- set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); +-} +- + static void +-qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt) ++qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) + { +- struct abts_entry_24xx *abts = pkt; ++ struct abts_entry_24xx *abts = ++ (struct abts_entry_24xx *)&pkt->iocb; + struct qla_hw_data *ha = vha->hw; + struct els_entry_24xx *rsp_els; + struct abts_entry_24xx *abts_rsp; +@@ -789,6 +765,74 @@ qla27xx_handle_8200_aen(scsi_qla_host_t + } + } + ++struct purex_item * ++qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) ++{ ++ struct purex_item *item = NULL; ++ uint8_t item_hdr_size = sizeof(*item); ++ ++ if (size > QLA_DEFAULT_PAYLOAD_SIZE) { ++ item = kzalloc(item_hdr_size + ++ (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); ++ } else { ++ if (atomic_inc_return(&vha->default_item.in_use) == 1) { ++ item = &vha->default_item; ++ goto initialize_purex_header; ++ } else { ++ item = kzalloc(item_hdr_size, GFP_ATOMIC); ++ } ++ } ++ if (!item) { ++ ql_log(ql_log_warn, vha, 0x5092, ++ ">> Failed allocate purex list item.\n"); ++ ++ return NULL; ++ } ++ ++initialize_purex_header: ++ item->vha = vha; ++ item->size = size; ++ return item; ++} ++ ++static void ++qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, ++ void (*process_item)(struct scsi_qla_host *vha, ++ struct purex_item *pkt)) ++{ ++ struct purex_list *list = &vha->purex_list; ++ ulong flags; ++ ++ pkt->process_item = process_item; ++ ++ spin_lock_irqsave(&list->lock, flags); ++ list_add_tail(&pkt->list, &list->head); ++ spin_unlock_irqrestore(&list->lock, flags); ++ ++ set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); ++} ++ ++/** ++ * qla24xx_copy_std_pkt() - Copy over purex ELS which is ++ * contained in a single IOCB. ++ * purex packet. ++ * @vha: SCSI driver HA context ++ * @pkt: ELS packet ++ */ ++struct purex_item ++*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) ++{ ++ struct purex_item *item; ++ ++ item = qla24xx_alloc_purex_item(vha, ++ QLA_DEFAULT_PAYLOAD_SIZE); ++ if (!item) ++ return item; ++ ++ memcpy(&item->iocb, pkt, sizeof(item->iocb)); ++ return item; ++} ++ + /** + * qla2x00_async_event() - Process aynchronous events. + * @vha: SCSI driver HA context +@@ -3229,6 +3273,7 @@ void qla24xx_process_response_queue(stru + { + struct sts_entry_24xx *pkt; + struct qla_hw_data *ha = vha->hw; ++ struct purex_item *pure_item; + + if (!ha->flags.fw_started) + return; +@@ -3280,8 +3325,12 @@ void qla24xx_process_response_queue(stru + break; + case ABTS_RECV_24XX: + if (qla_ini_mode_enabled(vha)) { +- qla24xx_purex_iocb(vha, pkt, +- qla24xx_process_abts); ++ pure_item = qla24xx_copy_std_pkt(vha, pkt); ++ if (!pure_item) ++ break; ++ ++ qla24xx_queue_purex_item(vha, pure_item, ++ qla24xx_process_abts); + break; + } + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || +@@ -3332,13 +3381,18 @@ void qla24xx_process_response_queue(stru + { + struct purex_entry_24xx *purex = (void *)pkt; + +- if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) { ++ if (purex->els_frame_payload[3] != ELS_RDP) { + ql_dbg(ql_dbg_init, vha, 0x5091, + "Discarding ELS Request opcode %#x...\n", + purex->els_frame_payload[3]); + break; + } +- qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp); ++ pure_item = qla24xx_copy_std_pkt(vha, pkt); ++ if (!pure_item) ++ break; ++ ++ qla24xx_queue_purex_item(vha, pure_item, ++ qla24xx_process_purex_rdp); + break; + } + default: +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -59,6 +59,7 @@ static struct rom_cmd { + { MBC_IOCB_COMMAND_A64 }, + { MBC_GET_ADAPTER_LOOP_ID }, + { MBC_READ_SFP }, ++ { MBC_SET_RNID_PARAMS }, + { MBC_GET_RNID_PARAMS }, + { MBC_GET_SET_ZIO_THRESHOLD }, + }; +@@ -4866,6 +4867,7 @@ qla24xx_get_port_login_templ(scsi_qla_ho + return rval; + } + ++#define PUREX_CMD_COUNT 2 + int + qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) + { +@@ -4874,12 +4876,12 @@ qla25xx_set_els_cmds_supported(scsi_qla_ + mbx_cmd_t *mcp = &mc; + uint8_t *els_cmd_map; + dma_addr_t els_cmd_map_dma; +- uint cmd_opcode = ELS_COMMAND_RDP; +- uint index = cmd_opcode / 8; +- uint bit = cmd_opcode % 8; ++ uint8_t cmd_opcode[PUREX_CMD_COUNT]; ++ uint8_t i, index, purex_bit; + struct qla_hw_data *ha = vha->hw; + +- if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha)) ++ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && ++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, +@@ -4893,7 +4895,17 @@ qla25xx_set_els_cmds_supported(scsi_qla_ + return QLA_MEMORY_ALLOC_FAILED; + } + +- els_cmd_map[index] |= 1 << bit; ++ memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE); ++ ++ /* List of Purex ELS */ ++ cmd_opcode[0] = ELS_FPIN; ++ cmd_opcode[1] = ELS_RDP; ++ ++ for (i = 0; i < PUREX_CMD_COUNT; i++) { ++ index = cmd_opcode[i] / 8; ++ purex_bit = cmd_opcode[i] % 8; ++ els_cmd_map[index] |= 1 << purex_bit; ++ } + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -5906,10 +5906,12 @@ qla25xx_rdp_port_speed_currently(struct + * vha: SCSI qla host + * purex: RDP request received by HBA + */ +-void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt) ++void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, ++ struct purex_item *item) + { + struct qla_hw_data *ha = vha->hw; +- struct purex_entry_24xx *purex = pkt; ++ struct purex_entry_24xx *purex = ++ (struct purex_entry_24xx *)&item->iocb; + dma_addr_t rsp_els_dma; + dma_addr_t rsp_payload_dma; + dma_addr_t stat_dma; +@@ -6319,6 +6321,15 @@ void qla24xx_process_purex_rdp(struct sc + rsp_els, rsp_els_dma); + } + ++void ++qla24xx_free_purex_item(struct purex_item *item) ++{ ++ if (item == &item->vha->default_item) ++ memset(&item->vha->default_item, 0, sizeof(struct purex_item)); ++ else ++ kfree(item); ++} ++ + void qla24xx_process_purex_list(struct purex_list *list) + { + struct list_head head = LIST_HEAD_INIT(head); +@@ -6331,8 +6342,8 @@ void qla24xx_process_purex_list(struct p + + list_for_each_entry_safe(item, next, &head, list) { + list_del(&item->list); +- item->process_item(item->vha, &item->iocb); +- kfree(item); ++ item->process_item(item->vha, item); ++ qla24xx_free_purex_item(item); + } + } + +--- a/include/uapi/scsi/fc/fc_els.h ++++ b/include/uapi/scsi/fc/fc_els.h +@@ -53,6 +53,7 @@ enum fc_els_cmd { + ELS_REC = 0x13, /* read exchange concise */ + ELS_SRR = 0x14, /* sequence retransmission request */ + ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */ ++ ELS_RDP = 0x18, /* Read Diagnostic Parameters */ + ELS_RDF = 0x19, /* Register Diagnostic Functions */ + ELS_PRLI = 0x20, /* process login */ + ELS_PRLO = 0x21, /* process logout */ +@@ -122,6 +123,7 @@ enum fc_els_cmd { + [ELS_REC] = "REC", \ + [ELS_SRR] = "SRR", \ + [ELS_FPIN] = "FPIN", \ ++ [ELS_RDP] = "RDP", \ + [ELS_RDF] = "RDF", \ + [ELS_PRLI] = "PRLI", \ + [ELS_PRLO] = "PRLO", \ diff --git a/patches.suse/scsi-qla2xxx-Change-two-hardcoded-constants-into-off.patch b/patches.suse/scsi-qla2xxx-Change-two-hardcoded-constants-into-off.patch new file mode 100644 index 0000000..17548cc --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Change-two-hardcoded-constants-into-off.patch @@ -0,0 +1,50 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:05 -0700 +Subject: scsi: qla2xxx: Change two hardcoded constants into offsetof() / + sizeof() expressions +Patch-mainline: v5.8-rc1 +Git-commit: e544b720ef3191cdb6d3ea2915f82973d6372bca +References: bsc#1171688 bsc#1174003 + +This patch does not change any functionality. + +Link: https://lore.kernel.org/r/20200518211712.11395-9-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Reviewed-by: Arun Easi +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_fw.h | 3 +-- + drivers/scsi/qla2xxx/qla_sup.c | 2 +- + 2 files changed, 2 insertions(+), 3 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_fw.h ++++ b/drivers/scsi/qla2xxx/qla_fw.h +@@ -2216,9 +2216,8 @@ struct qla_fcp_prio_cfg { + #define FCP_PRIO_ATTR_ENABLE 0x1 + #define FCP_PRIO_ATTR_PERSIST 0x2 + uint8_t reserved; /* Reserved for future use */ +-#define FCP_PRIO_CFG_HDR_SIZE 0x10 ++#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry) + struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */ +-#define FCP_PRIO_CFG_ENTRY_SIZE 0x20 + uint8_t reserved2[16]; + }; + +--- a/drivers/scsi/qla2xxx/qla_sup.c ++++ b/drivers/scsi/qla2xxx/qla_sup.c +@@ -3617,7 +3617,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_ + + /* read remaining FCP CMD config data from flash */ + fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2); +- len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE; ++ len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry); + max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE; + + ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0], diff --git a/patches.suse/scsi-qla2xxx-Check-if-FW-supports-MQ-before-enabling.patch b/patches.suse/scsi-qla2xxx-Check-if-FW-supports-MQ-before-enabling.patch new file mode 100644 index 0000000..21dfd85 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Check-if-FW-supports-MQ-before-enabling.patch @@ -0,0 +1,35 @@ +From: Saurav Kashyap +Date: Thu, 6 Aug 2020 04:10:11 -0700 +Subject: scsi: qla2xxx: Check if FW supports MQ before enabling +Patch-mainline: v5.9-rc2 +Git-commit: dffa11453313a115157b19021cc2e27ea98e624c +References: bsc#1171688 bsc#1174003 + +OS boot during Boot from SAN was stuck at dracut emergency shell after +enabling NVMe driver parameter. For non-MQ support the driver was enabling +MQ. Add a check to confirm if FW supports MQ. + +Link: https://lore.kernel.org/r/20200806111014.28434-9-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Saurav Kashyap +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_os.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -2023,6 +2023,11 @@ qla2x00_iospace_config(struct qla_hw_dat + /* Determine queue resources */ + ha->max_req_queues = ha->max_rsp_queues = 1; + ha->msix_count = QLA_BASE_VECTORS; ++ ++ /* Check if FW supports MQ or not */ ++ if (!(ha->fw_attributes & BIT_6)) ++ goto mqiobase_exit; ++ + if (!ql2xmqsupport || !ql2xnvmeenable || + (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) + goto mqiobase_exit; diff --git a/patches.suse/scsi-qla2xxx-Check-the-size-of-struct-fcp_hdr-at-com.patch b/patches.suse/scsi-qla2xxx-Check-the-size-of-struct-fcp_hdr-at-com.patch new file mode 100644 index 0000000..41eb047 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Check-the-size-of-struct-fcp_hdr-at-com.patch @@ -0,0 +1,34 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:46 -0700 +Subject: scsi: qla2xxx: Check the size of struct fcp_hdr at compile time +Patch-mainline: v5.9-rc1 +Git-commit: a7f474542ea36e32407dadd3849225d21c315bd4 +References: bsc#1171688 bsc#1174003 + +Since struct fcp_hdr is used to exchange data with the firmware, check its +size at compile time. + +Link: https://lore.kernel.org/r/20200629225454.22863-2-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/tcm_qla2xxx.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -2026,6 +2026,7 @@ static int __init tcm_qla2xxx_init(void) + BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); + BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); + BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); ++ BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24); + BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); + BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); + diff --git a/patches.suse/scsi-qla2xxx-Fix-MPI-failure-AEN-8200-handling.patch b/patches.suse/scsi-qla2xxx-Fix-MPI-failure-AEN-8200-handling.patch new file mode 100644 index 0000000..b154672 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-MPI-failure-AEN-8200-handling.patch @@ -0,0 +1,457 @@ +From: Arun Easi +Date: Tue, 31 Mar 2020 03:40:13 -0700 +Subject: scsi: qla2xxx: Fix MPI failure AEN (8200) handling +Patch-mainline: v5.8-rc1 +Git-commit: cbb01c2f2f630f1497f703c51ff21538ae2d86b8 +References: bsc#1171688 bsc#1174003 + +Today, upon an MPI failure AEN, on top of collecting an MPI dump, a regular +firmware dump is also taken and then chip reset. This is disruptive to IOs +and not required. Make the firmware dump collection, followed by chip +reset, optional (not done by default). + +Firmware dump buffer and MPI dump buffer are independent of each +other with this change and each can have dump that was taken at two +different times for two different issues. The MPI dump is saved in a +separate buffer and is retrieved differently from firmware dump. + +To collect full dump on MPI failure AEN, a module parameter is +introduced: + ql2xfulldump_on_mpifail (default: 0) + +Link: https://lore.kernel.org/r/20200331104015.24868-2-njavali@marvell.com +Reported-by: kbuild test robot +Reviewed-by: Himanshu Madhani +Signed-off-by: Arun Easi +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_attr.c | 30 +++++++++- + drivers/scsi/qla2xxx/qla_def.h | 13 ++++ + drivers/scsi/qla2xxx/qla_gbl.h | 3 + + drivers/scsi/qla2xxx/qla_init.c | 2 + drivers/scsi/qla2xxx/qla_isr.c | 54 ++++++++++++------ + drivers/scsi/qla2xxx/qla_os.c | 6 ++ + drivers/scsi/qla2xxx/qla_tmpl.c | 119 ++++++++++++++++++++++++++++++++-------- + 7 files changed, 185 insertions(+), 42 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -26,7 +26,8 @@ qla2x00_sysfs_read_fw_dump(struct file * + struct qla_hw_data *ha = vha->hw; + int rval = 0; + +- if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) ++ if (!(ha->fw_dump_reading || ha->mctp_dump_reading || ++ ha->mpi_fw_dump_reading)) + return 0; + + mutex_lock(&ha->optrom_mutex); +@@ -42,6 +43,10 @@ qla2x00_sysfs_read_fw_dump(struct file * + } else if (ha->mctp_dumped && ha->mctp_dump_reading) { + rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump, + MCTP_DUMP_SIZE); ++ } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) { ++ rval = memory_read_from_buffer(buf, count, &off, ++ ha->mpi_fw_dump, ++ ha->mpi_fw_dump_len); + } else if (ha->fw_dump_reading) { + rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, + ha->fw_dump_len); +@@ -103,7 +108,6 @@ qla2x00_sysfs_write_fw_dump(struct file + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + } else { +- ha->fw_dump_mpi = 1; + qla2x00_system_error(vha); + } + break; +@@ -137,6 +141,22 @@ qla2x00_sysfs_write_fw_dump(struct file + vha->host_no); + } + break; ++ case 8: ++ if (!ha->mpi_fw_dump_reading) ++ break; ++ ql_log(ql_log_info, vha, 0x70e7, ++ "MPI firmware dump cleared on (%ld).\n", vha->host_no); ++ ha->mpi_fw_dump_reading = 0; ++ ha->mpi_fw_dumped = 0; ++ break; ++ case 9: ++ if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) { ++ ha->mpi_fw_dump_reading = 1; ++ ql_log(ql_log_info, vha, 0x70e8, ++ "Raw MPI firmware dump ready for read on (%ld).\n", ++ vha->host_no); ++ } ++ break; + } + return count; + } +@@ -706,7 +726,8 @@ qla2x00_sysfs_write_reset(struct file *f + scsi_unblock_requests(vha->host); + break; + case 0x2025d: +- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) ++ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && ++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + ql_log(ql_log_info, vha, 0x706f, +@@ -724,6 +745,8 @@ qla2x00_sysfs_write_reset(struct file *f + qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP); + qla83xx_idc_unlock(vha, 0); + break; ++ } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ++ qla27xx_reset_mpi(vha); + } else { + /* Make sure FC side is not in reset */ + WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != +@@ -737,6 +760,7 @@ qla2x00_sysfs_write_reset(struct file *f + scsi_unblock_requests(vha->host); + break; + } ++ break; + case 0x2025e: + if (!IS_P3P_TYPE(ha) || vha != base_vha) { + ql_log(ql_log_info, vha, 0x7071, +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -3223,6 +3223,7 @@ struct isp_operations { + uint32_t); + + void (*fw_dump) (struct scsi_qla_host *, int); ++ void (*mpi_fw_dump)(struct scsi_qla_host *, int); + + int (*beacon_on) (struct scsi_qla_host *); + int (*beacon_off) (struct scsi_qla_host *); +@@ -3748,6 +3749,11 @@ struct qlt_hw_data { + + #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ + ++struct qla_hw_data_stat { ++ u32 num_fw_dump; ++ u32 num_mpi_reset; ++}; ++ + /* + * Qlogic host adapter specific data structure. + */ +@@ -4230,7 +4236,6 @@ struct qla_hw_data { + uint32_t fw_dump_len; + u32 fw_dump_alloc_len; + bool fw_dumped; +- bool fw_dump_mpi; + unsigned long fw_dump_cap_flags; + #define RISC_PAUSE_CMPL 0 + #define DMA_SHUTDOWN_CMPL 1 +@@ -4241,6 +4246,10 @@ struct qla_hw_data { + #define ISP_MBX_RDY 6 + #define ISP_SOFT_RESET_CMPL 7 + int fw_dump_reading; ++ void *mpi_fw_dump; ++ u32 mpi_fw_dump_len; ++ int mpi_fw_dump_reading:1; ++ int mpi_fw_dumped:1; + int prev_minidump_failed; + dma_addr_t eft_dma; + void *eft; +@@ -4454,6 +4463,8 @@ struct qla_hw_data { + uint16_t last_zio_threshold; + + #define DEFAULT_ZIO_THRESHOLD 5 ++ ++ struct qla_hw_data_stat stat; + }; + + struct active_regions { +--- a/drivers/scsi/qla2xxx/qla_gbl.h ++++ b/drivers/scsi/qla2xxx/qla_gbl.h +@@ -173,6 +173,7 @@ extern int ql2xenablemsix; + extern int qla2xuseresexchforels; + extern int ql2xexlogins; + extern int ql2xdifbundlinginternalbuffers; ++extern int ql2xfulldump_on_mpifail; + + extern int qla2x00_loop_reset(scsi_qla_host_t *); + extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); +@@ -645,6 +646,7 @@ extern void qla82xx_fw_dump(scsi_qla_hos + extern void qla8044_fw_dump(scsi_qla_host_t *, int); + + extern void qla27xx_fwdump(scsi_qla_host_t *, int); ++extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int); + extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *); + extern int qla27xx_fwdt_template_valid(void *); + extern ulong qla27xx_fwdt_template_size(void *); +@@ -933,5 +935,6 @@ extern void qla24xx_process_purex_list(s + + /* nvme.c */ + void qla_nvme_unregister_remote_port(struct fc_port *fcport); ++void qla27xx_reset_mpi(scsi_qla_host_t *vha); + void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); + #endif /* _QLA_GBL_H */ +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -3341,6 +3341,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *v + dump_size / 1024); + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ++ ha->mpi_fw_dump = (char *)fw_dump + ++ ha->fwdt[1].dump_size; + mutex_unlock(&ha->optrom_mutex); + return; + } +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -756,6 +756,39 @@ qla2x00_find_fcport_by_nportid(scsi_qla_ + return NULL; + } + ++/* Shall be called only on supported adapters. */ ++static void ++qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) ++{ ++ struct qla_hw_data *ha = vha->hw; ++ bool reset_isp_needed = 0; ++ ++ ql_log(ql_log_warn, vha, 0x02f0, ++ "MPI Heartbeat stop. MPI reset is%s needed. " ++ "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", ++ mb[0] & BIT_8 ? "" : " not", ++ mb[0], mb[1], mb[2], mb[3]); ++ ++ if ((mb[1] & BIT_8) == 0) ++ return; ++ ++ ql_log(ql_log_warn, vha, 0x02f1, ++ "MPI Heartbeat stop. FW dump needed\n"); ++ ++ if (ql2xfulldump_on_mpifail) { ++ ha->isp_ops->fw_dump(vha, 1); ++ reset_isp_needed = 1; ++ } ++ ++ ha->isp_ops->mpi_fw_dump(vha, 1); ++ ++ if (reset_isp_needed) { ++ vha->hw->flags.fw_init_done = 0; ++ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); ++ qla2xxx_wake_dpc(vha); ++ } ++} ++ + /** + * qla2x00_async_event() - Process aynchronous events. + * @vha: SCSI driver HA context +@@ -871,9 +904,9 @@ qla2x00_async_event(scsi_qla_host_t *vha + "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", + mb[1], mb[2], mb[3]); + +- ha->fw_dump_mpi = +- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) && +- RD_REG_WORD(®24->mailbox7) & BIT_8; ++ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && ++ RD_REG_WORD(®24->mailbox7) & BIT_8) ++ ha->isp_ops->mpi_fw_dump(vha, 1); + ha->isp_ops->fw_dump(vha, 1); + ha->flags.fw_init_done = 0; + QLA_FW_STOPPED(ha); +@@ -1374,20 +1407,7 @@ qla2x00_async_event(scsi_qla_host_t *vha + + case MBA_IDC_AEN: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { +- ha->flags.fw_init_done = 0; +- ql_log(ql_log_warn, vha, 0xffff, +- "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", +- mb[0], mb[1], mb[2], mb[3]); +- +- if ((mb[1] & BIT_8) || +- (mb[2] & BIT_8)) { +- ql_log(ql_log_warn, vha, 0xd013, +- "MPI Heartbeat stop. FW dump needed\n"); +- ha->fw_dump_mpi = 1; +- ha->isp_ops->fw_dump(vha, 1); +- } +- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); +- qla2xxx_wake_dpc(vha); ++ qla27xx_handle_8200_aen(vha, mb); + } else if (IS_QLA83XX(ha)) { + mb[4] = RD_REG_WORD(®24->mailbox4); + mb[5] = RD_REG_WORD(®24->mailbox5); +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -35,6 +35,11 @@ static int apidev_major; + */ + struct kmem_cache *srb_cachep; + ++int ql2xfulldump_on_mpifail; ++module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(ql2xfulldump_on_mpifail, ++ "Set this to take full dump on MPI hang."); ++ + /* + * CT6 CTX allocation cache + */ +@@ -2524,6 +2529,7 @@ static struct isp_operations qla27xx_isp + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla27xx_fwdump, ++ .mpi_fw_dump = qla27xx_mpi_fwdump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla83xx_beacon_blink, +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -12,6 +12,33 @@ + #define IOBASE(vha) IOBAR(ISPREG(vha)) + #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL) + ++/* hardware_lock assumed held. */ ++static void ++qla27xx_write_remote_reg(struct scsi_qla_host *vha, ++ u32 addr, u32 data) ++{ ++ char *reg = (char *)ISPREG(vha); ++ ++ ql_dbg(ql_dbg_misc, vha, 0xd300, ++ "%s: addr/data = %xh/%xh\n", __func__, addr, data); ++ ++ WRT_REG_DWORD(reg + IOBASE(vha), 0x40); ++ WRT_REG_DWORD(reg + 0xc4, data); ++ WRT_REG_DWORD(reg + 0xc0, addr); ++} ++ ++void ++qla27xx_reset_mpi(scsi_qla_host_t *vha) ++{ ++ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301, ++ "Entered %s.\n", __func__); ++ ++ qla27xx_write_remote_reg(vha, 0x104050, 0x40004); ++ qla27xx_write_remote_reg(vha, 0x10405c, 0x4); ++ ++ vha->hw->stat.num_mpi_reset++; ++} ++ + static inline void + qla27xx_insert16(uint16_t value, void *buf, ulong *len) + { +@@ -998,6 +1025,62 @@ qla27xx_fwdt_template_valid(void *p) + } + + void ++qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) ++{ ++ ulong flags = 0; ++ bool need_mpi_reset = 1; ++ ++#ifndef __CHECKER__ ++ if (!hardware_locked) ++ spin_lock_irqsave(&vha->hw->hardware_lock, flags); ++#endif ++ if (!vha->hw->mpi_fw_dump) { ++ ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n"); ++ } else if (vha->hw->mpi_fw_dumped) { ++ ql_log(ql_log_warn, vha, 0x02f4, ++ "-> MPI firmware already dumped (%p) -- ignoring request\n", ++ vha->hw->mpi_fw_dump); ++ } else { ++ struct fwdt *fwdt = &vha->hw->fwdt[1]; ++ ulong len; ++ void *buf = vha->hw->mpi_fw_dump; ++ ++ ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n"); ++ if (!fwdt->template) { ++ ql_log(ql_log_warn, vha, 0x02f6, ++ "-> fwdt1 no template\n"); ++ goto bailout; ++ } ++ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); ++ if (len == 0) { ++ goto bailout; ++ } else if (len != fwdt->dump_size) { ++ ql_log(ql_log_warn, vha, 0x02f7, ++ "-> fwdt1 fwdump residual=%+ld\n", ++ fwdt->dump_size - len); ++ } else { ++ need_mpi_reset = 0; ++ } ++ ++ vha->hw->mpi_fw_dump_len = len; ++ vha->hw->mpi_fw_dumped = 1; ++ ++ ql_log(ql_log_warn, vha, 0x02f8, ++ "-> MPI firmware dump saved to buffer (%lu/%p)\n", ++ vha->host_no, vha->hw->mpi_fw_dump); ++ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); ++ } ++ ++bailout: ++ if (need_mpi_reset) ++ qla27xx_reset_mpi(vha); ++#ifndef __CHECKER__ ++ if (!hardware_locked) ++ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); ++#endif ++} ++ ++void + qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) + { + ulong flags = 0; +@@ -1015,30 +1098,25 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int + vha->hw->fw_dump); + } else { + struct fwdt *fwdt = vha->hw->fwdt; +- uint j; + ulong len; + void *buf = vha->hw->fw_dump; +- uint count = vha->hw->fw_dump_mpi ? 2 : 1; + +- for (j = 0; j < count; j++, fwdt++, buf += len) { +- ql_log(ql_log_warn, vha, 0xd011, +- "-> fwdt%u running...\n", j); +- if (!fwdt->template) { +- ql_log(ql_log_warn, vha, 0xd012, +- "-> fwdt%u no template\n", j); +- break; +- } +- len = qla27xx_execute_fwdt_template(vha, +- fwdt->template, buf); +- if (len == 0) { +- goto bailout; +- } else if (len != fwdt->dump_size) { +- ql_log(ql_log_warn, vha, 0xd013, +- "-> fwdt%u fwdump residual=%+ld\n", +- j, fwdt->dump_size - len); +- } ++ ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n"); ++ if (!fwdt->template) { ++ ql_log(ql_log_warn, vha, 0xd012, ++ "-> fwdt0 no template\n"); ++ goto bailout; + } +- vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump; ++ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); ++ if (len == 0) { ++ goto bailout; ++ } else if (len != fwdt->dump_size) { ++ ql_log(ql_log_warn, vha, 0xd013, ++ "-> fwdt0 fwdump residual=%+ld\n", ++ fwdt->dump_size - len); ++ } ++ ++ vha->hw->fw_dump_len = len; + vha->hw->fw_dumped = 1; + + ql_log(ql_log_warn, vha, 0xd015, +@@ -1048,7 +1126,6 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int + } + + bailout: +- vha->hw->fw_dump_mpi = 0; + #ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); diff --git a/patches.suse/scsi-qla2xxx-Fix-WARN_ON-in-qla_nvme_register_hba.patch b/patches.suse/scsi-qla2xxx-Fix-WARN_ON-in-qla_nvme_register_hba.patch new file mode 100644 index 0000000..9c95508 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-WARN_ON-in-qla_nvme_register_hba.patch @@ -0,0 +1,51 @@ +From: Arun Easi +Date: Thu, 6 Aug 2020 04:10:10 -0700 +Subject: scsi: qla2xxx: Fix WARN_ON in qla_nvme_register_hba +Patch-mainline: v5.9-rc2 +Git-commit: 897d68eb816bfae5ad9e870f68350dbb599d6e0e +References: bsc#1171688 bsc#1174003 + +qla_nvme_register_hba() puts out a warning when there are not enough queue +pairs available for FC-NVME. Just fail the NVME registration rather than a +WARNING + call Trace. + +Link: https://lore.kernel.org/r/20200806111014.28434-8-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Arun Easi +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_def.h | 1 + + drivers/scsi/qla2xxx/qla_nvme.c | 10 +++++++++- + 2 files changed, 10 insertions(+), 1 deletion(-) + +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -3880,6 +3880,7 @@ struct qla_hw_data { + uint32_t scm_supported_f:1; + /* Enabled in Driver */ + uint32_t scm_enabled:1; ++ uint32_t max_req_queue_warned:1; + } flags; + + uint16_t max_exchg; +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -699,7 +699,15 @@ int qla_nvme_register_hba(struct scsi_ql + tmpl = &qla_nvme_fc_transport; + + WARN_ON(vha->nvme_local_port); +- WARN_ON(ha->max_req_queues < 3); ++ ++ if (ha->max_req_queues < 3) { ++ if (!ha->flags.max_req_queue_warned) ++ ql_log(ql_log_info, vha, 0x2120, ++ "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n", ++ __func__, ha->max_req_queues); ++ ha->flags.max_req_queue_warned = 1; ++ return ret; ++ } + + qla_nvme_fc_transport.max_hw_queues = + min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), diff --git a/patches.suse/scsi-qla2xxx-Fix-a-Coverity-complaint-in-qla2100_fw_.patch b/patches.suse/scsi-qla2xxx-Fix-a-Coverity-complaint-in-qla2100_fw_.patch new file mode 100644 index 0000000..6285e72 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-a-Coverity-complaint-in-qla2100_fw_.patch @@ -0,0 +1,51 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:52 -0700 +Subject: scsi: qla2xxx: Fix a Coverity complaint in qla2100_fw_dump() +Patch-mainline: v5.9-rc1 +Git-commit: 57fec9f24e580d8fe4219ee89572f49758e62c75 +References: bsc#1171688 bsc#1174003 + +'cnt' can exceed the size of the risc_ram[] array. Prevent that Coverity +complains by rewriting an address calculation expression. This patch fixes +the following Coverity complaint: + +CID 337803 (#1 of 1): Out-of-bounds read (OVERRUN) +109. overrun-local: Overrunning array of 122880 bytes at byte offset 122880 +by dereferencing pointer &fw->risc_ram[cnt]. + +Link: https://lore.kernel.org/r/20200629225454.22863-8-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.c | 2 +- + drivers/scsi/qla2xxx/qla_dbg.h | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -1063,7 +1063,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + } + + if (rval == QLA_SUCCESS) +- qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); ++ qla2xxx_copy_queues(ha, &fw->queue_dump[0]); + + qla2xxx_dump_post_process(base_vha, rval); + } +--- a/drivers/scsi/qla2xxx/qla_dbg.h ++++ b/drivers/scsi/qla2xxx/qla_dbg.h +@@ -53,6 +53,7 @@ struct qla2100_fw_dump { + __be16 fpm_b0_reg[64]; + __be16 fpm_b1_reg[64]; + __be16 risc_ram[0xf000]; ++ u8 queue_dump[]; + }; + + struct qla24xx_fw_dump { diff --git a/patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-header-fi.patch b/patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-header-fi.patch new file mode 100644 index 0000000..b0e0efd --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-header-fi.patch @@ -0,0 +1,3858 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:11 -0700 +Subject: scsi: qla2xxx: Fix endianness annotations in header files +Patch-mainline: v5.8-rc1 +Git-commit: 21038b0900d1b8728ec77d9286d7b0b57ca7b585 +References: bsc#1171688 bsc#1174003 + +Annotate members of FC protocol and firmware dump data structures as big +endian. Annotate members of RISC control structures as little endian. +Annotate mailbox registers as little endian. Annotate the mb[] arrays as +CPU-endian because communication of the mb[] values with the hardware +happens through the readw() and writew() functions. readw() converts from +__le16 to u16 and writew() converts from u16 to __le16. Annotate 'handles' +as CPU-endian because for the firmware these are opaque values. + +Link: https://lore.kernel.org/r/20200518211712.11395-15-bvanassche@acm.org +CC: Hannes Reinecke +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.h | 450 +++++++++++----------- + drivers/scsi/qla2xxx/qla_def.h | 652 ++++++++++++++++---------------- + drivers/scsi/qla2xxx/qla_fw.h | 762 +++++++++++++++++++------------------- + drivers/scsi/qla2xxx/qla_inline.h | 2 + drivers/scsi/qla2xxx/qla_mr.h | 8 + drivers/scsi/qla2xxx/qla_nvme.h | 64 +-- + drivers/scsi/qla2xxx/qla_nx.h | 38 - + drivers/scsi/qla2xxx/qla_target.h | 232 +++++------ + drivers/scsi/qla2xxx/qla_tmpl.h | 2 + 9 files changed, 1105 insertions(+), 1105 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.h ++++ b/drivers/scsi/qla2xxx/qla_dbg.h +@@ -12,205 +12,205 @@ + */ + + struct qla2300_fw_dump { +- uint16_t hccr; +- uint16_t pbiu_reg[8]; +- uint16_t risc_host_reg[8]; +- uint16_t mailbox_reg[32]; +- uint16_t resp_dma_reg[32]; +- uint16_t dma_reg[48]; +- uint16_t risc_hdw_reg[16]; +- uint16_t risc_gp0_reg[16]; +- uint16_t risc_gp1_reg[16]; +- uint16_t risc_gp2_reg[16]; +- uint16_t risc_gp3_reg[16]; +- uint16_t risc_gp4_reg[16]; +- uint16_t risc_gp5_reg[16]; +- uint16_t risc_gp6_reg[16]; +- uint16_t risc_gp7_reg[16]; +- uint16_t frame_buf_hdw_reg[64]; +- uint16_t fpm_b0_reg[64]; +- uint16_t fpm_b1_reg[64]; +- uint16_t risc_ram[0xf800]; +- uint16_t stack_ram[0x1000]; +- uint16_t data_ram[1]; ++ __be16 hccr; ++ __be16 pbiu_reg[8]; ++ __be16 risc_host_reg[8]; ++ __be16 mailbox_reg[32]; ++ __be16 resp_dma_reg[32]; ++ __be16 dma_reg[48]; ++ __be16 risc_hdw_reg[16]; ++ __be16 risc_gp0_reg[16]; ++ __be16 risc_gp1_reg[16]; ++ __be16 risc_gp2_reg[16]; ++ __be16 risc_gp3_reg[16]; ++ __be16 risc_gp4_reg[16]; ++ __be16 risc_gp5_reg[16]; ++ __be16 risc_gp6_reg[16]; ++ __be16 risc_gp7_reg[16]; ++ __be16 frame_buf_hdw_reg[64]; ++ __be16 fpm_b0_reg[64]; ++ __be16 fpm_b1_reg[64]; ++ __be16 risc_ram[0xf800]; ++ __be16 stack_ram[0x1000]; ++ __be16 data_ram[1]; + }; + + struct qla2100_fw_dump { +- uint16_t hccr; +- uint16_t pbiu_reg[8]; +- uint16_t mailbox_reg[32]; +- uint16_t dma_reg[48]; +- uint16_t risc_hdw_reg[16]; +- uint16_t risc_gp0_reg[16]; +- uint16_t risc_gp1_reg[16]; +- uint16_t risc_gp2_reg[16]; +- uint16_t risc_gp3_reg[16]; +- uint16_t risc_gp4_reg[16]; +- uint16_t risc_gp5_reg[16]; +- uint16_t risc_gp6_reg[16]; +- uint16_t risc_gp7_reg[16]; +- uint16_t frame_buf_hdw_reg[16]; +- uint16_t fpm_b0_reg[64]; +- uint16_t fpm_b1_reg[64]; +- uint16_t risc_ram[0xf000]; ++ __be16 hccr; ++ __be16 pbiu_reg[8]; ++ __be16 mailbox_reg[32]; ++ __be16 dma_reg[48]; ++ __be16 risc_hdw_reg[16]; ++ __be16 risc_gp0_reg[16]; ++ __be16 risc_gp1_reg[16]; ++ __be16 risc_gp2_reg[16]; ++ __be16 risc_gp3_reg[16]; ++ __be16 risc_gp4_reg[16]; ++ __be16 risc_gp5_reg[16]; ++ __be16 risc_gp6_reg[16]; ++ __be16 risc_gp7_reg[16]; ++ __be16 frame_buf_hdw_reg[16]; ++ __be16 fpm_b0_reg[64]; ++ __be16 fpm_b1_reg[64]; ++ __be16 risc_ram[0xf000]; + }; + + struct qla24xx_fw_dump { +- uint32_t host_status; +- uint32_t host_reg[32]; +- uint32_t shadow_reg[7]; +- uint16_t mailbox_reg[32]; +- uint32_t xseq_gp_reg[128]; +- uint32_t xseq_0_reg[16]; +- uint32_t xseq_1_reg[16]; +- uint32_t rseq_gp_reg[128]; +- uint32_t rseq_0_reg[16]; +- uint32_t rseq_1_reg[16]; +- uint32_t rseq_2_reg[16]; +- uint32_t cmd_dma_reg[16]; +- uint32_t req0_dma_reg[15]; +- uint32_t resp0_dma_reg[15]; +- uint32_t req1_dma_reg[15]; +- uint32_t xmt0_dma_reg[32]; +- uint32_t xmt1_dma_reg[32]; +- uint32_t xmt2_dma_reg[32]; +- uint32_t xmt3_dma_reg[32]; +- uint32_t xmt4_dma_reg[32]; +- uint32_t xmt_data_dma_reg[16]; +- uint32_t rcvt0_data_dma_reg[32]; +- uint32_t rcvt1_data_dma_reg[32]; +- uint32_t risc_gp_reg[128]; +- uint32_t lmc_reg[112]; +- uint32_t fpm_hdw_reg[192]; +- uint32_t fb_hdw_reg[176]; +- uint32_t code_ram[0x2000]; +- uint32_t ext_mem[1]; ++ __be32 host_status; ++ __be32 host_reg[32]; ++ __be32 shadow_reg[7]; ++ __be16 mailbox_reg[32]; ++ __be32 xseq_gp_reg[128]; ++ __be32 xseq_0_reg[16]; ++ __be32 xseq_1_reg[16]; ++ __be32 rseq_gp_reg[128]; ++ __be32 rseq_0_reg[16]; ++ __be32 rseq_1_reg[16]; ++ __be32 rseq_2_reg[16]; ++ __be32 cmd_dma_reg[16]; ++ __be32 req0_dma_reg[15]; ++ __be32 resp0_dma_reg[15]; ++ __be32 req1_dma_reg[15]; ++ __be32 xmt0_dma_reg[32]; ++ __be32 xmt1_dma_reg[32]; ++ __be32 xmt2_dma_reg[32]; ++ __be32 xmt3_dma_reg[32]; ++ __be32 xmt4_dma_reg[32]; ++ __be32 xmt_data_dma_reg[16]; ++ __be32 rcvt0_data_dma_reg[32]; ++ __be32 rcvt1_data_dma_reg[32]; ++ __be32 risc_gp_reg[128]; ++ __be32 lmc_reg[112]; ++ __be32 fpm_hdw_reg[192]; ++ __be32 fb_hdw_reg[176]; ++ __be32 code_ram[0x2000]; ++ __be32 ext_mem[1]; + }; + + struct qla25xx_fw_dump { +- uint32_t host_status; +- uint32_t host_risc_reg[32]; +- uint32_t pcie_regs[4]; +- uint32_t host_reg[32]; +- uint32_t shadow_reg[11]; +- uint32_t risc_io_reg; +- uint16_t mailbox_reg[32]; +- uint32_t xseq_gp_reg[128]; +- uint32_t xseq_0_reg[48]; +- uint32_t xseq_1_reg[16]; +- uint32_t rseq_gp_reg[128]; +- uint32_t rseq_0_reg[32]; +- uint32_t rseq_1_reg[16]; +- uint32_t rseq_2_reg[16]; +- uint32_t aseq_gp_reg[128]; +- uint32_t aseq_0_reg[32]; +- uint32_t aseq_1_reg[16]; +- uint32_t aseq_2_reg[16]; +- uint32_t cmd_dma_reg[16]; +- uint32_t req0_dma_reg[15]; +- uint32_t resp0_dma_reg[15]; +- uint32_t req1_dma_reg[15]; +- uint32_t xmt0_dma_reg[32]; +- uint32_t xmt1_dma_reg[32]; +- uint32_t xmt2_dma_reg[32]; +- uint32_t xmt3_dma_reg[32]; +- uint32_t xmt4_dma_reg[32]; +- uint32_t xmt_data_dma_reg[16]; +- uint32_t rcvt0_data_dma_reg[32]; +- uint32_t rcvt1_data_dma_reg[32]; +- uint32_t risc_gp_reg[128]; +- uint32_t lmc_reg[128]; +- uint32_t fpm_hdw_reg[192]; +- uint32_t fb_hdw_reg[192]; +- uint32_t code_ram[0x2000]; +- uint32_t ext_mem[1]; ++ __be32 host_status; ++ __be32 host_risc_reg[32]; ++ __be32 pcie_regs[4]; ++ __be32 host_reg[32]; ++ __be32 shadow_reg[11]; ++ __be32 risc_io_reg; ++ __be16 mailbox_reg[32]; ++ __be32 xseq_gp_reg[128]; ++ __be32 xseq_0_reg[48]; ++ __be32 xseq_1_reg[16]; ++ __be32 rseq_gp_reg[128]; ++ __be32 rseq_0_reg[32]; ++ __be32 rseq_1_reg[16]; ++ __be32 rseq_2_reg[16]; ++ __be32 aseq_gp_reg[128]; ++ __be32 aseq_0_reg[32]; ++ __be32 aseq_1_reg[16]; ++ __be32 aseq_2_reg[16]; ++ __be32 cmd_dma_reg[16]; ++ __be32 req0_dma_reg[15]; ++ __be32 resp0_dma_reg[15]; ++ __be32 req1_dma_reg[15]; ++ __be32 xmt0_dma_reg[32]; ++ __be32 xmt1_dma_reg[32]; ++ __be32 xmt2_dma_reg[32]; ++ __be32 xmt3_dma_reg[32]; ++ __be32 xmt4_dma_reg[32]; ++ __be32 xmt_data_dma_reg[16]; ++ __be32 rcvt0_data_dma_reg[32]; ++ __be32 rcvt1_data_dma_reg[32]; ++ __be32 risc_gp_reg[128]; ++ __be32 lmc_reg[128]; ++ __be32 fpm_hdw_reg[192]; ++ __be32 fb_hdw_reg[192]; ++ __be32 code_ram[0x2000]; ++ __be32 ext_mem[1]; + }; + + struct qla81xx_fw_dump { +- uint32_t host_status; +- uint32_t host_risc_reg[32]; +- uint32_t pcie_regs[4]; +- uint32_t host_reg[32]; +- uint32_t shadow_reg[11]; +- uint32_t risc_io_reg; +- uint16_t mailbox_reg[32]; +- uint32_t xseq_gp_reg[128]; +- uint32_t xseq_0_reg[48]; +- uint32_t xseq_1_reg[16]; +- uint32_t rseq_gp_reg[128]; +- uint32_t rseq_0_reg[32]; +- uint32_t rseq_1_reg[16]; +- uint32_t rseq_2_reg[16]; +- uint32_t aseq_gp_reg[128]; +- uint32_t aseq_0_reg[32]; +- uint32_t aseq_1_reg[16]; +- uint32_t aseq_2_reg[16]; +- uint32_t cmd_dma_reg[16]; +- uint32_t req0_dma_reg[15]; +- uint32_t resp0_dma_reg[15]; +- uint32_t req1_dma_reg[15]; +- uint32_t xmt0_dma_reg[32]; +- uint32_t xmt1_dma_reg[32]; +- uint32_t xmt2_dma_reg[32]; +- uint32_t xmt3_dma_reg[32]; +- uint32_t xmt4_dma_reg[32]; +- uint32_t xmt_data_dma_reg[16]; +- uint32_t rcvt0_data_dma_reg[32]; +- uint32_t rcvt1_data_dma_reg[32]; +- uint32_t risc_gp_reg[128]; +- uint32_t lmc_reg[128]; +- uint32_t fpm_hdw_reg[224]; +- uint32_t fb_hdw_reg[208]; +- uint32_t code_ram[0x2000]; +- uint32_t ext_mem[1]; ++ __be32 host_status; ++ __be32 host_risc_reg[32]; ++ __be32 pcie_regs[4]; ++ __be32 host_reg[32]; ++ __be32 shadow_reg[11]; ++ __be32 risc_io_reg; ++ __be16 mailbox_reg[32]; ++ __be32 xseq_gp_reg[128]; ++ __be32 xseq_0_reg[48]; ++ __be32 xseq_1_reg[16]; ++ __be32 rseq_gp_reg[128]; ++ __be32 rseq_0_reg[32]; ++ __be32 rseq_1_reg[16]; ++ __be32 rseq_2_reg[16]; ++ __be32 aseq_gp_reg[128]; ++ __be32 aseq_0_reg[32]; ++ __be32 aseq_1_reg[16]; ++ __be32 aseq_2_reg[16]; ++ __be32 cmd_dma_reg[16]; ++ __be32 req0_dma_reg[15]; ++ __be32 resp0_dma_reg[15]; ++ __be32 req1_dma_reg[15]; ++ __be32 xmt0_dma_reg[32]; ++ __be32 xmt1_dma_reg[32]; ++ __be32 xmt2_dma_reg[32]; ++ __be32 xmt3_dma_reg[32]; ++ __be32 xmt4_dma_reg[32]; ++ __be32 xmt_data_dma_reg[16]; ++ __be32 rcvt0_data_dma_reg[32]; ++ __be32 rcvt1_data_dma_reg[32]; ++ __be32 risc_gp_reg[128]; ++ __be32 lmc_reg[128]; ++ __be32 fpm_hdw_reg[224]; ++ __be32 fb_hdw_reg[208]; ++ __be32 code_ram[0x2000]; ++ __be32 ext_mem[1]; + }; + + struct qla83xx_fw_dump { +- uint32_t host_status; +- uint32_t host_risc_reg[48]; +- uint32_t pcie_regs[4]; +- uint32_t host_reg[32]; +- uint32_t shadow_reg[11]; +- uint32_t risc_io_reg; +- uint16_t mailbox_reg[32]; +- uint32_t xseq_gp_reg[256]; +- uint32_t xseq_0_reg[48]; +- uint32_t xseq_1_reg[16]; +- uint32_t xseq_2_reg[16]; +- uint32_t rseq_gp_reg[256]; +- uint32_t rseq_0_reg[32]; +- uint32_t rseq_1_reg[16]; +- uint32_t rseq_2_reg[16]; +- uint32_t rseq_3_reg[16]; +- uint32_t aseq_gp_reg[256]; +- uint32_t aseq_0_reg[32]; +- uint32_t aseq_1_reg[16]; +- uint32_t aseq_2_reg[16]; +- uint32_t aseq_3_reg[16]; +- uint32_t cmd_dma_reg[64]; +- uint32_t req0_dma_reg[15]; +- uint32_t resp0_dma_reg[15]; +- uint32_t req1_dma_reg[15]; +- uint32_t xmt0_dma_reg[32]; +- uint32_t xmt1_dma_reg[32]; +- uint32_t xmt2_dma_reg[32]; +- uint32_t xmt3_dma_reg[32]; +- uint32_t xmt4_dma_reg[32]; +- uint32_t xmt_data_dma_reg[16]; +- uint32_t rcvt0_data_dma_reg[32]; +- uint32_t rcvt1_data_dma_reg[32]; +- uint32_t risc_gp_reg[128]; +- uint32_t lmc_reg[128]; +- uint32_t fpm_hdw_reg[256]; +- uint32_t rq0_array_reg[256]; +- uint32_t rq1_array_reg[256]; +- uint32_t rp0_array_reg[256]; +- uint32_t rp1_array_reg[256]; +- uint32_t queue_control_reg[16]; +- uint32_t fb_hdw_reg[432]; +- uint32_t at0_array_reg[128]; +- uint32_t code_ram[0x2400]; +- uint32_t ext_mem[1]; ++ __be32 host_status; ++ __be32 host_risc_reg[48]; ++ __be32 pcie_regs[4]; ++ __be32 host_reg[32]; ++ __be32 shadow_reg[11]; ++ __be32 risc_io_reg; ++ __be16 mailbox_reg[32]; ++ __be32 xseq_gp_reg[256]; ++ __be32 xseq_0_reg[48]; ++ __be32 xseq_1_reg[16]; ++ __be32 xseq_2_reg[16]; ++ __be32 rseq_gp_reg[256]; ++ __be32 rseq_0_reg[32]; ++ __be32 rseq_1_reg[16]; ++ __be32 rseq_2_reg[16]; ++ __be32 rseq_3_reg[16]; ++ __be32 aseq_gp_reg[256]; ++ __be32 aseq_0_reg[32]; ++ __be32 aseq_1_reg[16]; ++ __be32 aseq_2_reg[16]; ++ __be32 aseq_3_reg[16]; ++ __be32 cmd_dma_reg[64]; ++ __be32 req0_dma_reg[15]; ++ __be32 resp0_dma_reg[15]; ++ __be32 req1_dma_reg[15]; ++ __be32 xmt0_dma_reg[32]; ++ __be32 xmt1_dma_reg[32]; ++ __be32 xmt2_dma_reg[32]; ++ __be32 xmt3_dma_reg[32]; ++ __be32 xmt4_dma_reg[32]; ++ __be32 xmt_data_dma_reg[16]; ++ __be32 rcvt0_data_dma_reg[32]; ++ __be32 rcvt1_data_dma_reg[32]; ++ __be32 risc_gp_reg[128]; ++ __be32 lmc_reg[128]; ++ __be32 fpm_hdw_reg[256]; ++ __be32 rq0_array_reg[256]; ++ __be32 rq1_array_reg[256]; ++ __be32 rp0_array_reg[256]; ++ __be32 rp1_array_reg[256]; ++ __be32 queue_control_reg[16]; ++ __be32 fb_hdw_reg[432]; ++ __be32 at0_array_reg[128]; ++ __be32 code_ram[0x2400]; ++ __be32 ext_mem[1]; + }; + + #define EFT_NUM_BUFFERS 4 +@@ -223,45 +223,45 @@ struct qla83xx_fw_dump { + #define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b)) + + struct qla2xxx_fce_chain { +- uint32_t type; +- uint32_t chain_size; ++ __be32 type; ++ __be32 chain_size; + +- uint32_t size; +- uint32_t addr_l; +- uint32_t addr_h; +- uint32_t eregs[8]; ++ __be32 size; ++ __be32 addr_l; ++ __be32 addr_h; ++ __be32 eregs[8]; + }; + + /* used by exchange off load and extended login offload */ + struct qla2xxx_offld_chain { +- uint32_t type; +- uint32_t chain_size; ++ __be32 type; ++ __be32 chain_size; + +- uint32_t size; +- uint32_t reserved; +- u64 addr; ++ __be32 size; ++ __be32 reserved; ++ __be64 addr; + }; + + struct qla2xxx_mq_chain { +- uint32_t type; +- uint32_t chain_size; ++ __be32 type; ++ __be32 chain_size; + +- uint32_t count; +- uint32_t qregs[4 * QLA_MQ_SIZE]; ++ __be32 count; ++ __be32 qregs[4 * QLA_MQ_SIZE]; + }; + + struct qla2xxx_mqueue_header { +- uint32_t queue; ++ __be32 queue; + #define TYPE_REQUEST_QUEUE 0x1 + #define TYPE_RESPONSE_QUEUE 0x2 + #define TYPE_ATIO_QUEUE 0x3 +- uint32_t number; +- uint32_t size; ++ __be32 number; ++ __be32 size; + }; + + struct qla2xxx_mqueue_chain { +- uint32_t type; +- uint32_t chain_size; ++ __be32 type; ++ __be32 chain_size; + }; + + #define DUMP_CHAIN_VARIANT 0x80000000 +@@ -274,28 +274,28 @@ struct qla2xxx_mqueue_chain { + + struct qla2xxx_fw_dump { + uint8_t signature[4]; +- uint32_t version; ++ __be32 version; + +- uint32_t fw_major_version; +- uint32_t fw_minor_version; +- uint32_t fw_subminor_version; +- uint32_t fw_attributes; +- +- uint32_t vendor; +- uint32_t device; +- uint32_t subsystem_vendor; +- uint32_t subsystem_device; +- +- uint32_t fixed_size; +- uint32_t mem_size; +- uint32_t req_q_size; +- uint32_t rsp_q_size; +- +- uint32_t eft_size; +- uint32_t eft_addr_l; +- uint32_t eft_addr_h; ++ __be32 fw_major_version; ++ __be32 fw_minor_version; ++ __be32 fw_subminor_version; ++ __be32 fw_attributes; ++ ++ __be32 vendor; ++ __be32 device; ++ __be32 subsystem_vendor; ++ __be32 subsystem_device; ++ ++ __be32 fixed_size; ++ __be32 mem_size; ++ __be32 req_q_size; ++ __be32 rsp_q_size; ++ ++ __be32 eft_size; ++ __be32 eft_addr_l; ++ __be32 eft_addr_h; + +- uint32_t header_size; ++ __be32 header_size; + + union { + struct qla2100_fw_dump isp21; +@@ -370,7 +370,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, + + extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, + uint32_t, void **); +-extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, ++extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *, + uint32_t, void **); + extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, + struct qla_hw_data *); +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -504,7 +504,7 @@ struct srb_iocb { + u32 rx_size; + dma_addr_t els_plogi_pyld_dma; + dma_addr_t els_resp_pyld_dma; +- uint32_t fw_status[3]; ++ __le32 fw_status[3]; + __le16 comp_status; + __le16 len; + } els_plogi; +@@ -555,8 +555,8 @@ struct srb_iocb { + #define MAX_IOCB_MB_REG 28 + #define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) + struct { +- __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ +- __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ ++ u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ ++ u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ + void *out, *in; + dma_addr_t out_dma, in_dma; + struct completion comp; +@@ -567,7 +567,7 @@ struct srb_iocb { + } nack; + struct { + __le16 comp_status; +- uint16_t rsp_pyld_len; ++ __le16 rsp_pyld_len; + uint8_t aen_op; + void *desc; + +@@ -698,23 +698,23 @@ struct msg_echo_lb { + * ISP I/O Register Set structure definitions. + */ + struct device_reg_2xxx { +- uint16_t flash_address; /* Flash BIOS address */ +- uint16_t flash_data; /* Flash BIOS data */ +- uint16_t unused_1[1]; /* Gap */ +- uint16_t ctrl_status; /* Control/Status */ ++ __le16 flash_address; /* Flash BIOS address */ ++ __le16 flash_data; /* Flash BIOS data */ ++ __le16 unused_1[1]; /* Gap */ ++ __le16 ctrl_status; /* Control/Status */ + #define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */ + #define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */ + #define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */ + +- uint16_t ictrl; /* Interrupt control */ ++ __le16 ictrl; /* Interrupt control */ + #define ICR_EN_INT BIT_15 /* ISP enable interrupts. */ + #define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */ + +- uint16_t istatus; /* Interrupt status */ ++ __le16 istatus; /* Interrupt status */ + #define ISR_RISC_INT BIT_3 /* RISC interrupt */ + +- uint16_t semaphore; /* Semaphore */ +- uint16_t nvram; /* NVRAM register. */ ++ __le16 semaphore; /* Semaphore */ ++ __le16 nvram; /* NVRAM register. */ + #define NVR_DESELECT 0 + #define NVR_BUSY BIT_15 + #define NVR_WRT_ENABLE BIT_14 /* Write enable */ +@@ -728,80 +728,80 @@ struct device_reg_2xxx { + + union { + struct { +- uint16_t mailbox0; +- uint16_t mailbox1; +- uint16_t mailbox2; +- uint16_t mailbox3; +- uint16_t mailbox4; +- uint16_t mailbox5; +- uint16_t mailbox6; +- uint16_t mailbox7; +- uint16_t unused_2[59]; /* Gap */ ++ __le16 mailbox0; ++ __le16 mailbox1; ++ __le16 mailbox2; ++ __le16 mailbox3; ++ __le16 mailbox4; ++ __le16 mailbox5; ++ __le16 mailbox6; ++ __le16 mailbox7; ++ __le16 unused_2[59]; /* Gap */ + } __attribute__((packed)) isp2100; + struct { + /* Request Queue */ +- uint16_t req_q_in; /* In-Pointer */ +- uint16_t req_q_out; /* Out-Pointer */ ++ __le16 req_q_in; /* In-Pointer */ ++ __le16 req_q_out; /* Out-Pointer */ + /* Response Queue */ +- uint16_t rsp_q_in; /* In-Pointer */ +- uint16_t rsp_q_out; /* Out-Pointer */ ++ __le16 rsp_q_in; /* In-Pointer */ ++ __le16 rsp_q_out; /* Out-Pointer */ + + /* RISC to Host Status */ +- uint32_t host_status; ++ __le32 host_status; + #define HSR_RISC_INT BIT_15 /* RISC interrupt */ + #define HSR_RISC_PAUSED BIT_8 /* RISC Paused */ + + /* Host to Host Semaphore */ +- uint16_t host_semaphore; +- uint16_t unused_3[17]; /* Gap */ +- uint16_t mailbox0; +- uint16_t mailbox1; +- uint16_t mailbox2; +- uint16_t mailbox3; +- uint16_t mailbox4; +- uint16_t mailbox5; +- uint16_t mailbox6; +- uint16_t mailbox7; +- uint16_t mailbox8; +- uint16_t mailbox9; +- uint16_t mailbox10; +- uint16_t mailbox11; +- uint16_t mailbox12; +- uint16_t mailbox13; +- uint16_t mailbox14; +- uint16_t mailbox15; +- uint16_t mailbox16; +- uint16_t mailbox17; +- uint16_t mailbox18; +- uint16_t mailbox19; +- uint16_t mailbox20; +- uint16_t mailbox21; +- uint16_t mailbox22; +- uint16_t mailbox23; +- uint16_t mailbox24; +- uint16_t mailbox25; +- uint16_t mailbox26; +- uint16_t mailbox27; +- uint16_t mailbox28; +- uint16_t mailbox29; +- uint16_t mailbox30; +- uint16_t mailbox31; +- uint16_t fb_cmd; +- uint16_t unused_4[10]; /* Gap */ ++ __le16 host_semaphore; ++ __le16 unused_3[17]; /* Gap */ ++ __le16 mailbox0; ++ __le16 mailbox1; ++ __le16 mailbox2; ++ __le16 mailbox3; ++ __le16 mailbox4; ++ __le16 mailbox5; ++ __le16 mailbox6; ++ __le16 mailbox7; ++ __le16 mailbox8; ++ __le16 mailbox9; ++ __le16 mailbox10; ++ __le16 mailbox11; ++ __le16 mailbox12; ++ __le16 mailbox13; ++ __le16 mailbox14; ++ __le16 mailbox15; ++ __le16 mailbox16; ++ __le16 mailbox17; ++ __le16 mailbox18; ++ __le16 mailbox19; ++ __le16 mailbox20; ++ __le16 mailbox21; ++ __le16 mailbox22; ++ __le16 mailbox23; ++ __le16 mailbox24; ++ __le16 mailbox25; ++ __le16 mailbox26; ++ __le16 mailbox27; ++ __le16 mailbox28; ++ __le16 mailbox29; ++ __le16 mailbox30; ++ __le16 mailbox31; ++ __le16 fb_cmd; ++ __le16 unused_4[10]; /* Gap */ + } __attribute__((packed)) isp2300; + } u; + +- uint16_t fpm_diag_config; +- uint16_t unused_5[0x4]; /* Gap */ +- uint16_t risc_hw; +- uint16_t unused_5_1; /* Gap */ +- uint16_t pcr; /* Processor Control Register. */ +- uint16_t unused_6[0x5]; /* Gap */ +- uint16_t mctr; /* Memory Configuration and Timing. */ +- uint16_t unused_7[0x3]; /* Gap */ +- uint16_t fb_cmd_2100; /* Unused on 23XX */ +- uint16_t unused_8[0x3]; /* Gap */ +- uint16_t hccr; /* Host command & control register. */ ++ __le16 fpm_diag_config; ++ __le16 unused_5[0x4]; /* Gap */ ++ __le16 risc_hw; ++ __le16 unused_5_1; /* Gap */ ++ __le16 pcr; /* Processor Control Register. */ ++ __le16 unused_6[0x5]; /* Gap */ ++ __le16 mctr; /* Memory Configuration and Timing. */ ++ __le16 unused_7[0x3]; /* Gap */ ++ __le16 fb_cmd_2100; /* Unused on 23XX */ ++ __le16 unused_8[0x3]; /* Gap */ ++ __le16 hccr; /* Host command & control register. */ + #define HCCR_HOST_INT BIT_7 /* Host interrupt bit */ + #define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */ + /* HCCR commands */ +@@ -814,9 +814,9 @@ struct device_reg_2xxx { + #define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */ + #define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */ + +- uint16_t unused_9[5]; /* Gap */ +- uint16_t gpiod; /* GPIO Data register. */ +- uint16_t gpioe; /* GPIO Enable register. */ ++ __le16 unused_9[5]; /* Gap */ ++ __le16 gpiod; /* GPIO Data register. */ ++ __le16 gpioe; /* GPIO Enable register. */ + #define GPIO_LED_MASK 0x00C0 + #define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000 + #define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 +@@ -828,95 +828,95 @@ struct device_reg_2xxx { + + union { + struct { +- uint16_t unused_10[8]; /* Gap */ +- uint16_t mailbox8; +- uint16_t mailbox9; +- uint16_t mailbox10; +- uint16_t mailbox11; +- uint16_t mailbox12; +- uint16_t mailbox13; +- uint16_t mailbox14; +- uint16_t mailbox15; +- uint16_t mailbox16; +- uint16_t mailbox17; +- uint16_t mailbox18; +- uint16_t mailbox19; +- uint16_t mailbox20; +- uint16_t mailbox21; +- uint16_t mailbox22; +- uint16_t mailbox23; /* Also probe reg. */ ++ __le16 unused_10[8]; /* Gap */ ++ __le16 mailbox8; ++ __le16 mailbox9; ++ __le16 mailbox10; ++ __le16 mailbox11; ++ __le16 mailbox12; ++ __le16 mailbox13; ++ __le16 mailbox14; ++ __le16 mailbox15; ++ __le16 mailbox16; ++ __le16 mailbox17; ++ __le16 mailbox18; ++ __le16 mailbox19; ++ __le16 mailbox20; ++ __le16 mailbox21; ++ __le16 mailbox22; ++ __le16 mailbox23; /* Also probe reg. */ + } __attribute__((packed)) isp2200; + } u_end; + }; + + struct device_reg_25xxmq { +- uint32_t req_q_in; +- uint32_t req_q_out; +- uint32_t rsp_q_in; +- uint32_t rsp_q_out; +- uint32_t atio_q_in; +- uint32_t atio_q_out; ++ __le32 req_q_in; ++ __le32 req_q_out; ++ __le32 rsp_q_in; ++ __le32 rsp_q_out; ++ __le32 atio_q_in; ++ __le32 atio_q_out; + }; + + + struct device_reg_fx00 { +- uint32_t mailbox0; /* 00 */ +- uint32_t mailbox1; /* 04 */ +- uint32_t mailbox2; /* 08 */ +- uint32_t mailbox3; /* 0C */ +- uint32_t mailbox4; /* 10 */ +- uint32_t mailbox5; /* 14 */ +- uint32_t mailbox6; /* 18 */ +- uint32_t mailbox7; /* 1C */ +- uint32_t mailbox8; /* 20 */ +- uint32_t mailbox9; /* 24 */ +- uint32_t mailbox10; /* 28 */ +- uint32_t mailbox11; +- uint32_t mailbox12; +- uint32_t mailbox13; +- uint32_t mailbox14; +- uint32_t mailbox15; +- uint32_t mailbox16; +- uint32_t mailbox17; +- uint32_t mailbox18; +- uint32_t mailbox19; +- uint32_t mailbox20; +- uint32_t mailbox21; +- uint32_t mailbox22; +- uint32_t mailbox23; +- uint32_t mailbox24; +- uint32_t mailbox25; +- uint32_t mailbox26; +- uint32_t mailbox27; +- uint32_t mailbox28; +- uint32_t mailbox29; +- uint32_t mailbox30; +- uint32_t mailbox31; +- uint32_t aenmailbox0; +- uint32_t aenmailbox1; +- uint32_t aenmailbox2; +- uint32_t aenmailbox3; +- uint32_t aenmailbox4; +- uint32_t aenmailbox5; +- uint32_t aenmailbox6; +- uint32_t aenmailbox7; ++ __le32 mailbox0; /* 00 */ ++ __le32 mailbox1; /* 04 */ ++ __le32 mailbox2; /* 08 */ ++ __le32 mailbox3; /* 0C */ ++ __le32 mailbox4; /* 10 */ ++ __le32 mailbox5; /* 14 */ ++ __le32 mailbox6; /* 18 */ ++ __le32 mailbox7; /* 1C */ ++ __le32 mailbox8; /* 20 */ ++ __le32 mailbox9; /* 24 */ ++ __le32 mailbox10; /* 28 */ ++ __le32 mailbox11; ++ __le32 mailbox12; ++ __le32 mailbox13; ++ __le32 mailbox14; ++ __le32 mailbox15; ++ __le32 mailbox16; ++ __le32 mailbox17; ++ __le32 mailbox18; ++ __le32 mailbox19; ++ __le32 mailbox20; ++ __le32 mailbox21; ++ __le32 mailbox22; ++ __le32 mailbox23; ++ __le32 mailbox24; ++ __le32 mailbox25; ++ __le32 mailbox26; ++ __le32 mailbox27; ++ __le32 mailbox28; ++ __le32 mailbox29; ++ __le32 mailbox30; ++ __le32 mailbox31; ++ __le32 aenmailbox0; ++ __le32 aenmailbox1; ++ __le32 aenmailbox2; ++ __le32 aenmailbox3; ++ __le32 aenmailbox4; ++ __le32 aenmailbox5; ++ __le32 aenmailbox6; ++ __le32 aenmailbox7; + /* Request Queue. */ +- uint32_t req_q_in; /* A0 - Request Queue In-Pointer */ +- uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */ ++ __le32 req_q_in; /* A0 - Request Queue In-Pointer */ ++ __le32 req_q_out; /* A4 - Request Queue Out-Pointer */ + /* Response Queue. */ +- uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */ +- uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */ ++ __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */ ++ __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */ + /* Init values shadowed on FW Up Event */ +- uint32_t initval0; /* B0 */ +- uint32_t initval1; /* B4 */ +- uint32_t initval2; /* B8 */ +- uint32_t initval3; /* BC */ +- uint32_t initval4; /* C0 */ +- uint32_t initval5; /* C4 */ +- uint32_t initval6; /* C8 */ +- uint32_t initval7; /* CC */ +- uint32_t fwheartbeat; /* D0 */ +- uint32_t pseudoaen; /* D4 */ ++ __le32 initval0; /* B0 */ ++ __le32 initval1; /* B4 */ ++ __le32 initval2; /* B8 */ ++ __le32 initval3; /* BC */ ++ __le32 initval4; /* C0 */ ++ __le32 initval5; /* C4 */ ++ __le32 initval6; /* C8 */ ++ __le32 initval7; /* CC */ ++ __le32 fwheartbeat; /* D0 */ ++ __le32 pseudoaen; /* D4 */ + }; + + +@@ -1351,7 +1351,7 @@ typedef struct { + uint8_t port_id[4]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; +- uint16_t execution_throttle; ++ __le16 execution_throttle; + uint16_t execution_count; + uint8_t reset_count; + uint8_t reserved_2; +@@ -1437,9 +1437,9 @@ typedef struct { + */ + uint8_t firmware_options[2]; + +- uint16_t frame_payload_size; +- uint16_t max_iocb_allocation; +- uint16_t execution_throttle; ++ __le16 frame_payload_size; ++ __le16 max_iocb_allocation; ++ __le16 execution_throttle; + uint8_t retry_count; + uint8_t retry_delay; /* unused */ + uint8_t port_name[WWN_SIZE]; /* Big endian. */ +@@ -1448,17 +1448,17 @@ typedef struct { + uint8_t login_timeout; + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + +- uint16_t request_q_outpointer; +- uint16_t response_q_inpointer; +- uint16_t request_q_length; +- uint16_t response_q_length; +- __le64 request_q_address __packed; +- __le64 response_q_address __packed; ++ __le16 request_q_outpointer; ++ __le16 response_q_inpointer; ++ __le16 request_q_length; ++ __le16 response_q_length; ++ __le64 request_q_address __packed; ++ __le64 response_q_address __packed; + +- uint16_t lun_enables; ++ __le16 lun_enables; + uint8_t command_resource_count; + uint8_t immediate_notify_resource_count; +- uint16_t timeout; ++ __le16 timeout; + uint8_t reserved_2[2]; + + /* +@@ -1606,8 +1606,8 @@ typedef struct { + uint8_t firmware_options[2]; + + uint16_t frame_payload_size; +- uint16_t max_iocb_allocation; +- uint16_t execution_throttle; ++ __le16 max_iocb_allocation; ++ __le16 execution_throttle; + uint8_t retry_count; + uint8_t retry_delay; /* unused */ + uint8_t port_name[WWN_SIZE]; /* Big endian. */ +@@ -1731,7 +1731,7 @@ typedef struct { + uint8_t reset_delay; + uint8_t port_down_retry_count; + uint8_t boot_id_number; +- uint16_t max_luns_per_target; ++ __le16 max_luns_per_target; + uint8_t fcode_boot_port_name[WWN_SIZE]; + uint8_t alternate_port_name[WWN_SIZE]; + uint8_t alternate_node_name[WWN_SIZE]; +@@ -1837,7 +1837,7 @@ struct atio { + }; + + typedef union { +- uint16_t extended; ++ __le16 extended; + struct { + uint8_t reserved; + uint8_t standard; +@@ -1863,18 +1863,18 @@ typedef struct { + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ + target_id_t target; /* SCSI ID */ +- uint16_t lun; /* SCSI LUN */ +- uint16_t control_flags; /* Control flags. */ ++ __le16 lun; /* SCSI LUN */ ++ __le16 control_flags; /* Control flags. */ + #define CF_WRITE BIT_6 + #define CF_READ BIT_5 + #define CF_SIMPLE_TAG BIT_3 + #define CF_ORDERED_TAG BIT_2 + #define CF_HEAD_TAG BIT_1 + uint16_t reserved_1; +- uint16_t timeout; /* Command timeout. */ +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 timeout; /* Command timeout. */ ++ __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ +- uint32_t byte_count; /* Total byte count. */ ++ __le32 byte_count; /* Total byte count. */ + union { + struct dsd32 dsd32[3]; + struct dsd64 dsd64[2]; +@@ -1892,11 +1892,11 @@ typedef struct { + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ + target_id_t target; /* SCSI ID */ +- uint16_t lun; /* SCSI LUN */ +- uint16_t control_flags; /* Control flags. */ ++ __le16 lun; /* SCSI LUN */ ++ __le16 control_flags; /* Control flags. */ + uint16_t reserved_1; +- uint16_t timeout; /* Command timeout. */ +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 timeout; /* Command timeout. */ ++ __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ + uint32_t byte_count; /* Total byte count. */ + struct dsd64 dsd[2]; +@@ -1958,7 +1958,7 @@ struct crc_context { + __le16 guard_seed; /* Initial Guard Seed */ + __le16 prot_opts; /* Requested Data Protection Mode */ + __le16 blk_size; /* Data size in bytes */ +- uint16_t runt_blk_guard; /* Guard value for runt block (tape ++ __le16 runt_blk_guard; /* Guard value for runt block (tape + * only) */ + __le32 byte_count; /* Total byte count/ total data + * transfer count */ +@@ -2011,13 +2011,13 @@ typedef struct { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System handle. */ +- uint16_t scsi_status; /* SCSI status. */ +- uint16_t comp_status; /* Completion status. */ +- uint16_t state_flags; /* State flags. */ +- uint16_t status_flags; /* Status flags. */ +- uint16_t rsp_info_len; /* Response Info Length. */ +- uint16_t req_sense_length; /* Request sense data length. */ +- uint32_t residual_length; /* Residual transfer length. */ ++ __le16 scsi_status; /* SCSI status. */ ++ __le16 comp_status; /* Completion status. */ ++ __le16 state_flags; /* State flags. */ ++ __le16 status_flags; /* Status flags. */ ++ __le16 rsp_info_len; /* Response Info Length. */ ++ __le16 req_sense_length; /* Request sense data length. */ ++ __le32 residual_length; /* Residual transfer length. */ + uint8_t rsp_info[8]; /* FCP response information. */ + uint8_t req_sense_data[32]; /* Request sense data. */ + } sts_entry_t; +@@ -2149,8 +2149,8 @@ typedef struct { + /* clear port changed, */ + /* use sequence number. */ + uint8_t reserved_1; +- uint16_t sequence_number; /* Sequence number of event */ +- uint16_t lun; /* SCSI LUN */ ++ __le16 sequence_number; /* Sequence number of event */ ++ __le16 lun; /* SCSI LUN */ + uint8_t reserved_2[48]; + } mrk_entry_t; + +@@ -2165,19 +2165,19 @@ typedef struct { + uint8_t entry_status; /* Entry Status. */ + uint32_t handle1; /* System handle. */ + target_id_t loop_id; +- uint16_t status; +- uint16_t control_flags; /* Control flags. */ ++ __le16 status; ++ __le16 control_flags; /* Control flags. */ + uint16_t reserved2; +- uint16_t timeout; +- uint16_t cmd_dsd_count; +- uint16_t total_dsd_count; ++ __le16 timeout; ++ __le16 cmd_dsd_count; ++ __le16 total_dsd_count; + uint8_t type; + uint8_t r_ctl; +- uint16_t rx_id; ++ __le16 rx_id; + uint16_t reserved3; + uint32_t handle2; +- uint32_t rsp_bytecount; +- uint32_t req_bytecount; ++ __le32 rsp_bytecount; ++ __le32 req_bytecount; + struct dsd64 req_dsd; + struct dsd64 rsp_dsd; + } ms_iocb_entry_t; +@@ -2205,20 +2205,20 @@ struct mbx_entry { + uint32_t handle; + target_id_t loop_id; + +- uint16_t status; +- uint16_t state_flags; +- uint16_t status_flags; ++ __le16 status; ++ __le16 state_flags; ++ __le16 status_flags; + + uint32_t sys_define2[2]; + +- uint16_t mb0; +- uint16_t mb1; +- uint16_t mb2; +- uint16_t mb3; +- uint16_t mb6; +- uint16_t mb7; +- uint16_t mb9; +- uint16_t mb10; ++ __le16 mb0; ++ __le16 mb1; ++ __le16 mb2; ++ __le16 mb3; ++ __le16 mb6; ++ __le16 mb7; ++ __le16 mb9; ++ __le16 mb10; + uint32_t reserved_2[2]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; +@@ -2240,52 +2240,52 @@ struct imm_ntfy_from_isp { + uint8_t entry_status; /* Entry Status. */ + union { + struct { +- uint32_t sys_define_2; /* System defined. */ ++ __le32 sys_define_2; /* System defined. */ + target_id_t target; +- uint16_t lun; ++ __le16 lun; + uint8_t target_id; + uint8_t reserved_1; +- uint16_t status_modifier; +- uint16_t status; +- uint16_t task_flags; +- uint16_t seq_id; +- uint16_t srr_rx_id; +- uint32_t srr_rel_offs; +- uint16_t srr_ui; ++ __le16 status_modifier; ++ __le16 status; ++ __le16 task_flags; ++ __le16 seq_id; ++ __le16 srr_rx_id; ++ __le32 srr_rel_offs; ++ __le16 srr_ui; + #define SRR_IU_DATA_IN 0x1 + #define SRR_IU_DATA_OUT 0x5 + #define SRR_IU_STATUS 0x7 +- uint16_t srr_ox_id; ++ __le16 srr_ox_id; + uint8_t reserved_2[28]; + } isp2x; + struct { + uint32_t reserved; +- uint16_t nport_handle; ++ __le16 nport_handle; + uint16_t reserved_2; +- uint16_t flags; ++ __le16 flags; + #define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 + #define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 +- uint16_t srr_rx_id; +- uint16_t status; ++ __le16 srr_rx_id; ++ __le16 status; + uint8_t status_subcode; + uint8_t fw_handle; +- uint32_t exchange_address; +- uint32_t srr_rel_offs; +- uint16_t srr_ui; +- uint16_t srr_ox_id; ++ __le32 exchange_address; ++ __le32 srr_rel_offs; ++ __le16 srr_ui; ++ __le16 srr_ox_id; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ +- uint16_t wd3_lo; ++ __le16 wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; +- uint16_t nport_handle; ++ __le16 nport_handle; + uint16_t resv2; + } req_els; + } u; +@@ -2298,7 +2298,7 @@ struct imm_ntfy_from_isp { + } isp24; + } u; + uint16_t reserved_7; +- uint16_t ox_id; ++ __le16 ox_id; + } __packed; + #endif + +@@ -2688,8 +2688,8 @@ static const char * const port_dstate_st + #define FDMI_HBA_VENDOR_IDENTIFIER 0xe0 + + struct ct_fdmi_hba_attr { +- uint16_t type; +- uint16_t len; ++ __be16 type; ++ __be16 len; + union { + uint8_t node_name[WWN_SIZE]; + uint8_t manufacturer[64]; +@@ -2701,11 +2701,11 @@ struct ct_fdmi_hba_attr { + uint8_t orom_version[16]; + uint8_t fw_version[32]; + uint8_t os_version[128]; +- uint32_t max_ct_len; ++ __be32 max_ct_len; + + uint8_t sym_name[256]; +- uint32_t vendor_specific_info; +- uint32_t num_ports; ++ __be32 vendor_specific_info; ++ __be32 num_ports; + uint8_t fabric_name[WWN_SIZE]; + uint8_t bios_name[32]; + uint8_t vendor_identifier[8]; +@@ -2713,12 +2713,12 @@ struct ct_fdmi_hba_attr { + }; + + struct ct_fdmi1_hba_attributes { +- uint32_t count; ++ __be32 count; + struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT]; + }; + + struct ct_fdmi2_hba_attributes { +- uint32_t count; ++ __be32 count; + struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT]; + }; + +@@ -2770,44 +2770,44 @@ struct ct_fdmi2_hba_attributes { + #define FC_CLASS_2_3 0x0C + + struct ct_fdmi_port_attr { +- uint16_t type; +- uint16_t len; ++ __be16 type; ++ __be16 len; + union { + uint8_t fc4_types[32]; +- uint32_t sup_speed; +- uint32_t cur_speed; +- uint32_t max_frame_size; ++ __be32 sup_speed; ++ __be32 cur_speed; ++ __be32 max_frame_size; + uint8_t os_dev_name[32]; + uint8_t host_name[256]; + + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + uint8_t port_sym_name[128]; +- uint32_t port_type; +- uint32_t port_supported_cos; ++ __be32 port_type; ++ __be32 port_supported_cos; + uint8_t fabric_name[WWN_SIZE]; + uint8_t port_fc4_type[32]; +- uint32_t port_state; +- uint32_t num_ports; +- uint32_t port_id; ++ __be32 port_state; ++ __be32 num_ports; ++ __be32 port_id; + + uint8_t smartsan_service[24]; + uint8_t smartsan_guid[16]; + uint8_t smartsan_version[24]; + uint8_t smartsan_prod_name[16]; +- uint32_t smartsan_port_info; +- uint32_t smartsan_qos_support; +- uint32_t smartsan_security_support; ++ __be32 smartsan_port_info; ++ __be32 smartsan_qos_support; ++ __be32 smartsan_security_support; + } a; + }; + + struct ct_fdmi1_port_attributes { +- uint32_t count; ++ __be32 count; + struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT]; + }; + + struct ct_fdmi2_port_attributes { +- uint32_t count; ++ __be32 count; + struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT]; + }; + +@@ -2861,8 +2861,8 @@ struct ct_cmd_hdr { + /* CT command request */ + struct ct_sns_req { + struct ct_cmd_hdr header; +- uint16_t command; +- uint16_t max_rsp_size; ++ __be16 command; ++ __be16 max_rsp_size; + uint8_t fragment_id; + uint8_t reserved[3]; + +@@ -2919,7 +2919,7 @@ struct ct_sns_req { + + struct { + uint8_t hba_identifier[8]; +- uint32_t entry_count; ++ __be32 entry_count; + uint8_t port_name[8]; + struct ct_fdmi2_hba_attributes attrs; + } rhba; +@@ -2974,7 +2974,7 @@ struct ct_sns_req { + /* CT command response header */ + struct ct_rsp_hdr { + struct ct_cmd_hdr header; +- uint16_t response; ++ __be16 response; + uint16_t residual; + uint8_t fragment_id; + uint8_t reason_code; +@@ -3060,8 +3060,8 @@ struct ct_sns_rsp { + } gfpn_id; + + struct { +- uint16_t speeds; +- uint16_t speed; ++ __be16 speeds; ++ __be16 speed; + } gpsc; + + #define GFF_FCP_SCSI_OFFSET 7 +@@ -3151,13 +3151,13 @@ struct fab_scan { + struct sns_cmd_pkt { + union { + struct { +- uint16_t buffer_length; +- uint16_t reserved_1; +- __le64 buffer_address __packed; +- uint16_t subcommand_length; +- uint16_t reserved_2; +- uint16_t subcommand; +- uint16_t size; ++ __le16 buffer_length; ++ __le16 reserved_1; ++ __le64 buffer_address __packed; ++ __le16 subcommand_length; ++ __le16 reserved_2; ++ __le16 subcommand; ++ __le16 size; + uint32_t reserved_3; + uint8_t param[36]; + } cmd; +@@ -3183,7 +3183,7 @@ struct gid_list_info { + uint8_t area; + uint8_t domain; + uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */ +- uint16_t loop_id; /* ISP23XX -- 6 bytes. */ ++ __le16 loop_id; /* ISP23XX -- 6 bytes. */ + uint16_t reserved_1; /* ISP24XX -- 8 bytes. */ + }; + +@@ -3492,8 +3492,8 @@ struct rsp_que { + dma_addr_t dma; + response_t *ring; + response_t *ring_ptr; +- uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */ +- uint32_t __iomem *rsp_q_out; ++ __le32 __iomem *rsp_q_in; /* FWI2-capable only. */ ++ __le32 __iomem *rsp_q_out; + uint16_t ring_index; + uint16_t out_ptr; + uint16_t *in_ptr; /* queue shadow in index */ +@@ -3519,8 +3519,8 @@ struct req_que { + dma_addr_t dma; + request_t *ring; + request_t *ring_ptr; +- uint32_t __iomem *req_q_in; /* FWI2-capable only. */ +- uint32_t __iomem *req_q_out; ++ __le32 __iomem *req_q_in; /* FWI2-capable only. */ ++ __le32 __iomem *req_q_out; + uint16_t ring_index; + uint16_t in_ptr; + uint16_t *out_ptr; /* queue shadow out index */ +@@ -3588,7 +3588,7 @@ struct qla_qpair { + struct list_head hints_list; + uint16_t cpuid; + uint16_t retry_term_cnt; +- uint32_t retry_term_exchg_addr; ++ __le32 retry_term_exchg_addr; + uint64_t retry_term_jiff; + struct qla_tgt_counters tgt_counters; + }; +@@ -3615,98 +3615,98 @@ struct rdp_req_payload { + + struct rdp_rsp_payload { + struct { +- uint32_t cmd; +- uint32_t len; ++ __be32 cmd; ++ __be32 len; + } hdr; + + /* LS Request Info descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint32_t req_payload_word_0; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be32 req_payload_word_0; + } ls_req_info_desc; + + /* LS Request Info descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint32_t req_payload_word_0; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be32 req_payload_word_0; + } ls_req_info_desc2; + + /* SFP diagnostic param descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint16_t temperature; +- uint16_t vcc; +- uint16_t tx_bias; +- uint16_t tx_power; +- uint16_t rx_power; +- uint16_t sfp_flags; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be16 temperature; ++ __be16 vcc; ++ __be16 tx_bias; ++ __be16 tx_power; ++ __be16 rx_power; ++ __be16 sfp_flags; + } sfp_diag_desc; + + /* Port Speed Descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint16_t speed_capab; +- uint16_t operating_speed; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be16 speed_capab; ++ __be16 operating_speed; + } port_speed_desc; + + /* Link Error Status Descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint32_t link_fail_cnt; +- uint32_t loss_sync_cnt; +- uint32_t loss_sig_cnt; +- uint32_t prim_seq_err_cnt; +- uint32_t inval_xmit_word_cnt; +- uint32_t inval_crc_cnt; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be32 link_fail_cnt; ++ __be32 loss_sync_cnt; ++ __be32 loss_sig_cnt; ++ __be32 prim_seq_err_cnt; ++ __be32 inval_xmit_word_cnt; ++ __be32 inval_crc_cnt; + uint8_t pn_port_phy_type; + uint8_t reserved[3]; + } ls_err_desc; + + /* Port name description with diag param */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; ++ __be32 desc_tag; ++ __be32 desc_len; + uint8_t WWNN[WWN_SIZE]; + uint8_t WWPN[WWN_SIZE]; + } port_name_diag_desc; + + /* Port Name desc for Direct attached Fx_Port or Nx_Port */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; ++ __be32 desc_tag; ++ __be32 desc_len; + uint8_t WWNN[WWN_SIZE]; + uint8_t WWPN[WWN_SIZE]; + } port_name_direct_desc; + + /* Buffer Credit descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint32_t fcport_b2b; +- uint32_t attached_fcport_b2b; +- uint32_t fcport_rtt; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be32 fcport_b2b; ++ __be32 attached_fcport_b2b; ++ __be32 fcport_rtt; + } buffer_credit_desc; + + /* Optical Element Data Descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; +- uint16_t high_alarm; +- uint16_t low_alarm; +- uint16_t high_warn; +- uint16_t low_warn; +- uint32_t element_flags; ++ __be32 desc_tag; ++ __be32 desc_len; ++ __be16 high_alarm; ++ __be16 low_alarm; ++ __be16 high_warn; ++ __be16 low_warn; ++ __be32 element_flags; + } optical_elmt_desc[5]; + + /* Optical Product Data Descriptor */ + struct { +- uint32_t desc_tag; +- uint32_t desc_len; ++ __be32 desc_tag; ++ __be32 desc_len; + uint8_t vendor_name[16]; + uint8_t part_number[16]; + uint8_t serial_number[16]; +@@ -3744,17 +3744,17 @@ struct qlt_hw_data { + struct atio *atio_ring_ptr; /* Current address. */ + uint16_t atio_ring_index; /* Current index. */ + uint16_t atio_q_length; +- uint32_t __iomem *atio_q_in; +- uint32_t __iomem *atio_q_out; ++ __le32 __iomem *atio_q_in; ++ __le32 __iomem *atio_q_out; + + struct qla_tgt_func_tmpl *tgt_ops; + struct qla_tgt_vp_map *tgt_vp_map; + + int saved_set; +- uint16_t saved_exchange_count; +- uint32_t saved_firmware_options_1; +- uint32_t saved_firmware_options_2; +- uint32_t saved_firmware_options_3; ++ __le16 saved_exchange_count; ++ __le32 saved_firmware_options_1; ++ __le32 saved_firmware_options_2; ++ __le32 saved_firmware_options_3; + uint8_t saved_firmware_options[2]; + uint8_t saved_add_firmware_options[2]; + +@@ -4253,7 +4253,7 @@ struct qla_hw_data { + + uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ + uint8_t fw_seriallink_options[4]; +- uint16_t fw_seriallink_options24[4]; ++ __le16 fw_seriallink_options24[4]; + + uint8_t serdes_version[3]; + uint8_t mpi_version[3]; +@@ -4436,7 +4436,7 @@ struct qla_hw_data { + #define NUM_DSD_CHAIN 4096 + + uint8_t fw_type; +- __le32 file_prd_off; /* File firmware product offset */ ++ uint32_t file_prd_off; /* File firmware product offset */ + + uint32_t md_template_size; + void *md_tmplt_hdr; +@@ -4744,13 +4744,13 @@ typedef struct scsi_qla_host { + + struct qla27xx_image_status { + uint8_t image_status_mask; +- uint16_t generation; ++ __le16 generation; + uint8_t ver_major; + uint8_t ver_minor; + uint8_t bitmap; /* 28xx only */ + uint8_t reserved[2]; +- uint32_t checksum; +- uint32_t signature; ++ __le32 checksum; ++ __le32 signature; + } __packed; + + /* 28xx aux image status bimap values */ +--- a/drivers/scsi/qla2xxx/qla_fw.h ++++ b/drivers/scsi/qla2xxx/qla_fw.h +@@ -134,28 +134,28 @@ struct vp_database_24xx { + struct nvram_24xx { + /* NVRAM header. */ + uint8_t id[4]; +- uint16_t nvram_version; ++ __le16 nvram_version; + uint16_t reserved_0; + + /* Firmware Initialization Control Block. */ +- uint16_t version; ++ __le16 version; + uint16_t reserved_1; +- __le16 frame_payload_size; +- uint16_t execution_throttle; +- uint16_t exchange_count; +- uint16_t hard_address; ++ __le16 frame_payload_size; ++ __le16 execution_throttle; ++ __le16 exchange_count; ++ __le16 hard_address; + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + +- uint16_t login_retry_count; +- uint16_t link_down_on_nos; +- uint16_t interrupt_delay_timer; +- uint16_t login_timeout; +- +- uint32_t firmware_options_1; +- uint32_t firmware_options_2; +- uint32_t firmware_options_3; ++ __le16 login_retry_count; ++ __le16 link_down_on_nos; ++ __le16 interrupt_delay_timer; ++ __le16 login_timeout; ++ ++ __le32 firmware_options_1; ++ __le32 firmware_options_2; ++ __le32 firmware_options_3; + + /* Offset 56. */ + +@@ -178,7 +178,7 @@ struct nvram_24xx { + * BIT 11-13 = Output Emphasis 4G + * BIT 14-15 = Reserved + */ +- uint16_t seriallink_options[4]; ++ __le16 seriallink_options[4]; + + uint16_t reserved_2[16]; + +@@ -218,25 +218,25 @@ struct nvram_24xx { + * + * BIT 16-31 = + */ +- uint32_t host_p; ++ __le32 host_p; + + uint8_t alternate_port_name[WWN_SIZE]; + uint8_t alternate_node_name[WWN_SIZE]; + + uint8_t boot_port_name[WWN_SIZE]; +- uint16_t boot_lun_number; ++ __le16 boot_lun_number; + uint16_t reserved_8; + + uint8_t alt1_boot_port_name[WWN_SIZE]; +- uint16_t alt1_boot_lun_number; ++ __le16 alt1_boot_lun_number; + uint16_t reserved_9; + + uint8_t alt2_boot_port_name[WWN_SIZE]; +- uint16_t alt2_boot_lun_number; ++ __le16 alt2_boot_lun_number; + uint16_t reserved_10; + + uint8_t alt3_boot_port_name[WWN_SIZE]; +- uint16_t alt3_boot_lun_number; ++ __le16 alt3_boot_lun_number; + uint16_t reserved_11; + + /* +@@ -249,23 +249,23 @@ struct nvram_24xx { + * BIT 6 = Reserved + * BIT 7-31 = + */ +- uint32_t efi_parameters; ++ __le32 efi_parameters; + + uint8_t reset_delay; + uint8_t reserved_12; + uint16_t reserved_13; + +- uint16_t boot_id_number; ++ __le16 boot_id_number; + uint16_t reserved_14; + +- uint16_t max_luns_per_target; ++ __le16 max_luns_per_target; + uint16_t reserved_15; + +- uint16_t port_down_retry_count; +- uint16_t link_down_timeout; ++ __le16 port_down_retry_count; ++ __le16 link_down_timeout; + + /* FCode parameters. */ +- uint16_t fcode_parameter; ++ __le16 fcode_parameter; + + uint16_t reserved_16[3]; + +@@ -275,13 +275,13 @@ struct nvram_24xx { + uint8_t prev_drv_ver_minor; + uint8_t prev_drv_ver_subminor; + +- uint16_t prev_bios_ver_major; +- uint16_t prev_bios_ver_minor; ++ __le16 prev_bios_ver_major; ++ __le16 prev_bios_ver_minor; + +- uint16_t prev_efi_ver_major; +- uint16_t prev_efi_ver_minor; ++ __le16 prev_efi_ver_major; ++ __le16 prev_efi_ver_minor; + +- uint16_t prev_fw_ver_major; ++ __le16 prev_fw_ver_major; + uint8_t prev_fw_ver_minor; + uint8_t prev_fw_ver_subminor; + +@@ -309,7 +309,7 @@ struct nvram_24xx { + uint16_t subsystem_vendor_id; + uint16_t subsystem_device_id; + +- uint32_t checksum; ++ __le32 checksum; + }; + + /* +@@ -318,46 +318,46 @@ struct nvram_24xx { + */ + #define ICB_VERSION 1 + struct init_cb_24xx { +- uint16_t version; ++ __le16 version; + uint16_t reserved_1; + +- uint16_t frame_payload_size; +- uint16_t execution_throttle; +- uint16_t exchange_count; ++ __le16 frame_payload_size; ++ __le16 execution_throttle; ++ __le16 exchange_count; + +- uint16_t hard_address; ++ __le16 hard_address; + + uint8_t port_name[WWN_SIZE]; /* Big endian. */ + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + +- uint16_t response_q_inpointer; +- uint16_t request_q_outpointer; ++ __le16 response_q_inpointer; ++ __le16 request_q_outpointer; + +- uint16_t login_retry_count; ++ __le16 login_retry_count; + +- uint16_t prio_request_q_outpointer; ++ __le16 prio_request_q_outpointer; + +- uint16_t response_q_length; +- uint16_t request_q_length; ++ __le16 response_q_length; ++ __le16 request_q_length; + +- uint16_t link_down_on_nos; /* Milliseconds. */ ++ __le16 link_down_on_nos; /* Milliseconds. */ + +- uint16_t prio_request_q_length; ++ __le16 prio_request_q_length; + + __le64 request_q_address __packed; + __le64 response_q_address __packed; + __le64 prio_request_q_address __packed; + +- uint16_t msix; +- uint16_t msix_atio; ++ __le16 msix; ++ __le16 msix_atio; + uint8_t reserved_2[4]; + +- uint16_t atio_q_inpointer; +- uint16_t atio_q_length; +- __le64 atio_q_address __packed; ++ __le16 atio_q_inpointer; ++ __le16 atio_q_length; ++ __le64 atio_q_address __packed; + +- uint16_t interrupt_delay_timer; /* 100us increments. */ +- uint16_t login_timeout; ++ __le16 interrupt_delay_timer; /* 100us increments. */ ++ __le16 login_timeout; + + /* + * BIT 0 = Enable Hard Loop Id +@@ -378,7 +378,7 @@ struct init_cb_24xx { + * BIT 14 = Node Name Option + * BIT 15-31 = Reserved + */ +- uint32_t firmware_options_1; ++ __le32 firmware_options_1; + + /* + * BIT 0 = Operation Mode bit 0 +@@ -399,7 +399,7 @@ struct init_cb_24xx { + * BIT 14 = Enable Target PRLI Control + * BIT 15-31 = Reserved + */ +- uint32_t firmware_options_2; ++ __le32 firmware_options_2; + + /* + * BIT 0 = Reserved +@@ -425,9 +425,9 @@ struct init_cb_24xx { + * BIT 30 = Enable request queue 0 out index shadowing + * BIT 31 = Reserved + */ +- uint32_t firmware_options_3; +- uint16_t qos; +- uint16_t rid; ++ __le32 firmware_options_3; ++ __le16 qos; ++ __le16 rid; + uint8_t reserved_3[20]; + }; + +@@ -443,27 +443,27 @@ struct cmd_bidir { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT hanlde. */ ++ __le16 nport_handle; /* N_PORT handle. */ + +- uint16_t timeout; /* Commnad timeout. */ ++ __le16 timeout; /* Command timeout. */ + +- uint16_t wr_dseg_count; /* Write Data segment count. */ +- uint16_t rd_dseg_count; /* Read Data segment count. */ ++ __le16 wr_dseg_count; /* Write Data segment count. */ ++ __le16 rd_dseg_count; /* Read Data segment count. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + +- uint16_t control_flags; /* Control flags. */ ++ __le16 control_flags; /* Control flags. */ + #define BD_WRAP_BACK BIT_3 + #define BD_READ_DATA BIT_1 + #define BD_WRITE_DATA BIT_0 + +- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ ++ __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + __le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */ + + uint16_t reserved[2]; /* Reserved */ + +- uint32_t rd_byte_count; /* Total Byte count Read. */ +- uint32_t wr_byte_count; /* Total Byte count write. */ ++ __le32 rd_byte_count; /* Total Byte count Read. */ ++ __le32 wr_byte_count; /* Total Byte count write. */ + + uint8_t port_id[3]; /* PortID of destination port.*/ + uint8_t vp_index; +@@ -480,28 +480,28 @@ struct cmd_type_6 { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT handle. */ +- uint16_t timeout; /* Command timeout. */ ++ __le16 nport_handle; /* N_PORT handle. */ ++ __le16 timeout; /* Command timeout. */ + +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 dseg_count; /* Data segment count. */ + +- uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */ ++ __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + +- uint16_t control_flags; /* Control flags. */ ++ __le16 control_flags; /* Control flags. */ + #define CF_DIF_SEG_DESCR_ENABLE BIT_3 + #define CF_DATA_SEG_DESCR_ENABLE BIT_2 + #define CF_READ_DATA BIT_1 + #define CF_WRITE_DATA BIT_0 + +- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ ++ __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + /* Data segment address. */ + __le64 fcp_cmnd_dseg_address __packed; + /* Data segment address. */ + __le64 fcp_rsp_dseg_address __packed; + +- uint32_t byte_count; /* Total byte count. */ ++ __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; +@@ -518,16 +518,16 @@ struct cmd_type_7 { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT handle. */ +- uint16_t timeout; /* Command timeout. */ ++ __le16 nport_handle; /* N_PORT handle. */ ++ __le16 timeout; /* Command timeout. */ + #define FW_MAX_TIMEOUT 0x1999 + +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 dseg_count; /* Data segment count. */ + uint16_t reserved_1; + + struct scsi_lun lun; /* FCP LUN (BE). */ + +- uint16_t task_mgmt_flags; /* Task management flags. */ ++ __le16 task_mgmt_flags; /* Task management flags. */ + #define TMF_CLEAR_ACA BIT_14 + #define TMF_TARGET_RESET BIT_13 + #define TMF_LUN_RESET BIT_12 +@@ -547,7 +547,7 @@ struct cmd_type_7 { + uint8_t crn; + + uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */ +- uint32_t byte_count; /* Total byte count. */ ++ __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; +@@ -565,29 +565,29 @@ struct cmd_type_crc_2 { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT handle. */ +- uint16_t timeout; /* Command timeout. */ ++ __le16 nport_handle; /* N_PORT handle. */ ++ __le16 timeout; /* Command timeout. */ + +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 dseg_count; /* Data segment count. */ + +- uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */ ++ __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + +- uint16_t control_flags; /* Control flags. */ ++ __le16 control_flags; /* Control flags. */ + +- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ ++ __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + __le64 fcp_cmnd_dseg_address __packed; + /* Data segment address. */ + __le64 fcp_rsp_dseg_address __packed; + +- uint32_t byte_count; /* Total byte count. */ ++ __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; + + __le64 crc_context_address __packed; /* Data segment address. */ +- uint16_t crc_context_len; /* Data segment length. */ ++ __le16 crc_context_len; /* Data segment length. */ + uint16_t reserved_1; /* MUST be set to 0. */ + }; + +@@ -604,32 +604,32 @@ struct sts_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t comp_status; /* Completion status. */ +- uint16_t ox_id; /* OX_ID used by the firmware. */ ++ __le16 comp_status; /* Completion status. */ ++ __le16 ox_id; /* OX_ID used by the firmware. */ + +- uint32_t residual_len; /* FW calc residual transfer length. */ ++ __le32 residual_len; /* FW calc residual transfer length. */ + + union { + uint16_t reserved_1; +- uint16_t nvme_rsp_pyld_len; ++ __le16 nvme_rsp_pyld_len; + }; + +- uint16_t state_flags; /* State flags. */ ++ __le16 state_flags; /* State flags. */ + #define SF_TRANSFERRED_DATA BIT_11 + #define SF_NVME_ERSP BIT_6 + #define SF_FCP_RSP_DMA BIT_0 + +- uint16_t retry_delay; +- uint16_t scsi_status; /* SCSI status. */ ++ __le16 retry_delay; ++ __le16 scsi_status; /* SCSI status. */ + #define SS_CONFIRMATION_REQ BIT_12 + +- uint32_t rsp_residual_count; /* FCP RSP residual count. */ ++ __le32 rsp_residual_count; /* FCP RSP residual count. */ + +- uint32_t sense_len; /* FCP SENSE length. */ ++ __le32 sense_len; /* FCP SENSE length. */ + + union { + struct { +- uint32_t rsp_data_len; /* FCP response data length */ ++ __le32 rsp_data_len; /* FCP response data length */ + uint8_t data[28]; /* FCP rsp/sense information */ + }; + struct nvme_fc_ersp_iu nvme_ersp; +@@ -672,7 +672,7 @@ struct mrk_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + + uint8_t modifier; /* Modifier (7-0). */ + #define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */ +@@ -701,24 +701,24 @@ struct ct_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t comp_status; /* Completion status. */ ++ __le16 comp_status; /* Completion status. */ + +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + +- uint16_t cmd_dsd_count; ++ __le16 cmd_dsd_count; + + uint8_t vp_index; + uint8_t reserved_1; + +- uint16_t timeout; /* Command timeout. */ ++ __le16 timeout; /* Command timeout. */ + uint16_t reserved_2; + +- uint16_t rsp_dsd_count; ++ __le16 rsp_dsd_count; + + uint8_t reserved_3[10]; + +- uint32_t rsp_byte_count; +- uint32_t cmd_byte_count; ++ __le32 rsp_byte_count; ++ __le32 cmd_byte_count; + + struct dsd64 dsd[2]; + }; +@@ -733,17 +733,17 @@ struct purex_entry_24xx { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + +- uint16_t reserved1; ++ __le16 reserved1; + uint8_t vp_idx; + uint8_t reserved2; + +- uint16_t status_flags; +- uint16_t nport_handle; ++ __le16 status_flags; ++ __le16 nport_handle; + +- uint16_t frame_size; +- uint16_t trunc_frame_size; ++ __le16 frame_size; ++ __le16 trunc_frame_size; + +- uint32_t rx_xchg_addr; ++ __le32 rx_xchg_addr; + + uint8_t d_id[3]; + uint8_t r_ctl; +@@ -754,13 +754,13 @@ struct purex_entry_24xx { + uint8_t f_ctl[3]; + uint8_t type; + +- uint16_t seq_cnt; ++ __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + +- uint16_t rx_id; +- uint16_t ox_id; +- uint32_t param; ++ __le16 rx_id; ++ __le16 ox_id; ++ __le32 param; + + uint8_t els_frame_payload[20]; + }; +@@ -777,18 +777,18 @@ struct els_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t comp_status; /* response only */ +- uint16_t nport_handle; ++ __le16 comp_status; /* response only */ ++ __le16 nport_handle; + +- uint16_t tx_dsd_count; ++ __le16 tx_dsd_count; + + uint8_t vp_index; + uint8_t sof_type; + #define EST_SOFI3 (1 << 4) + #define EST_SOFI2 (3 << 4) + +- uint32_t rx_xchg_address; /* Receive exchange address. */ +- uint16_t rx_dsd_count; ++ __le32 rx_xchg_address; /* Receive exchange address. */ ++ __le16 rx_dsd_count; + + uint8_t opcode; + uint8_t reserved_2; +@@ -796,7 +796,7 @@ struct els_entry_24xx { + uint8_t d_id[3]; + uint8_t s_id[3]; + +- uint16_t control_flags; /* Control flags. */ ++ __le16 control_flags; /* Control flags. */ + #define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13) + #define EPD_ELS_COMMAND (0 << 13) + #define EPD_ELS_ACC (1 << 13) +@@ -817,10 +817,10 @@ struct els_entry_24xx { + __le32 rx_len; /* DSD 1 length. */ + }; + struct { +- uint32_t total_byte_count; +- uint32_t error_subcode_1; +- uint32_t error_subcode_2; +- uint32_t error_subcode_3; ++ __le32 total_byte_count; ++ __le32 error_subcode_1; ++ __le32 error_subcode_2; ++ __le32 error_subcode_3; + }; + }; + }; +@@ -831,19 +831,19 @@ struct els_sts_entry_24xx { + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + +- uint32_t handle; /* System handle. */ ++ __le32 handle; /* System handle. */ + +- uint16_t comp_status; ++ __le16 comp_status; + +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + +- uint16_t reserved_1; ++ __le16 reserved_1; + + uint8_t vp_index; + uint8_t sof_type; + +- uint32_t rx_xchg_address; /* Receive exchange address. */ +- uint16_t reserved_2; ++ __le32 rx_xchg_address; /* Receive exchange address. */ ++ __le16 reserved_2; + + uint8_t opcode; + uint8_t reserved_3; +@@ -851,13 +851,13 @@ struct els_sts_entry_24xx { + uint8_t d_id[3]; + uint8_t s_id[3]; + +- uint16_t control_flags; /* Control flags. */ +- uint32_t total_byte_count; +- uint32_t error_subcode_1; +- uint32_t error_subcode_2; +- uint32_t error_subcode_3; ++ __le16 control_flags; /* Control flags. */ ++ __le32 total_byte_count; ++ __le32 error_subcode_1; ++ __le32 error_subcode_2; ++ __le32 error_subcode_3; + +- uint32_t reserved_4[4]; ++ __le32 reserved_4[4]; + }; + /* + * ISP queue - Mailbox Command entry structure definition. +@@ -884,12 +884,12 @@ struct logio_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t comp_status; /* Completion status. */ ++ __le16 comp_status; /* Completion status. */ + #define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */ + +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + +- uint16_t control_flags; /* Control flags. */ ++ __le16 control_flags; /* Control flags. */ + /* Modifiers. */ + #define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */ + #define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */ +@@ -918,7 +918,7 @@ struct logio_entry_24xx { + + uint8_t rsp_size; /* Response size in 32bit words. */ + +- uint32_t io_parameter[11]; /* General I/O parameters. */ ++ __le32 io_parameter[11]; /* General I/O parameters. */ + #define LSC_SCODE_NOLINK 0x01 + #define LSC_SCODE_NOIOCB 0x02 + #define LSC_SCODE_NOXCB 0x03 +@@ -946,17 +946,17 @@ struct tsk_mgmt_entry { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + + uint16_t reserved_1; + +- uint16_t delay; /* Activity delay in seconds. */ ++ __le16 delay; /* Activity delay in seconds. */ + +- uint16_t timeout; /* Command timeout. */ ++ __le16 timeout; /* Command timeout. */ + + struct scsi_lun lun; /* FCP LUN (BE). */ + +- uint32_t control_flags; /* Control Flags. */ ++ __le32 control_flags; /* Control Flags. */ + #define TCF_NOTMCMD_TO_TARGET BIT_31 + #define TCF_LUN_RESET BIT_4 + #define TCF_ABORT_TASK_SET BIT_3 +@@ -981,15 +981,15 @@ struct abort_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + /* or Completion status. */ + +- uint16_t options; /* Options. */ ++ __le16 options; /* Options. */ + #define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */ + + uint32_t handle_to_abort; /* System handle to abort. */ + +- uint16_t req_que_no; ++ __le16 req_que_no; + uint8_t reserved_1[30]; + + uint8_t port_id[3]; /* PortID of destination port. */ +@@ -1006,16 +1006,16 @@ struct abts_entry_24xx { + uint8_t handle_count; + uint8_t entry_status; + +- uint32_t handle; /* type 0x55 only */ ++ __le32 handle; /* type 0x55 only */ + +- uint16_t comp_status; /* type 0x55 only */ +- uint16_t nport_handle; /* type 0x54 only */ ++ __le16 comp_status; /* type 0x55 only */ ++ __le16 nport_handle; /* type 0x54 only */ + +- uint16_t control_flags; /* type 0x55 only */ ++ __le16 control_flags; /* type 0x55 only */ + uint8_t vp_idx; + uint8_t sof_type; /* sof_type is upper nibble */ + +- uint32_t rx_xch_addr; ++ __le32 rx_xch_addr; + + uint8_t d_id[3]; + uint8_t r_ctl; +@@ -1026,30 +1026,30 @@ struct abts_entry_24xx { + uint8_t f_ctl[3]; + uint8_t type; + +- uint16_t seq_cnt; ++ __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; + +- uint16_t rx_id; +- uint16_t ox_id; ++ __le16 rx_id; ++ __le16 ox_id; + +- uint32_t param; ++ __le32 param; + + union { + struct { +- uint32_t subcode3; +- uint32_t rsvd; +- uint32_t subcode1; +- uint32_t subcode2; ++ __le32 subcode3; ++ __le32 rsvd; ++ __le32 subcode1; ++ __le32 subcode2; + } error; + struct { +- uint16_t rsrvd1; ++ __le16 rsrvd1; + uint8_t last_seq_id; + uint8_t seq_id_valid; +- uint16_t aborted_rx_id; +- uint16_t aborted_ox_id; +- uint16_t high_seq_cnt; +- uint16_t low_seq_cnt; ++ __le16 aborted_rx_id; ++ __le16 aborted_ox_id; ++ __le16 high_seq_cnt; ++ __le16 low_seq_cnt; + } ba_acc; + struct { + uint8_t vendor_unique; +@@ -1058,7 +1058,7 @@ struct abts_entry_24xx { + } ba_rjt; + } payload; + +- uint32_t rx_xch_addr_to_abort; ++ __le32 rx_xch_addr_to_abort; + } __packed; + + /* ABTS payload explanation values */ +@@ -1087,7 +1087,7 @@ struct abts_entry_24xx { + * ISP I/O Register Set structure definitions. + */ + struct device_reg_24xx { +- uint32_t flash_addr; /* Flash/NVRAM BIOS address. */ ++ __le32 flash_addr; /* Flash/NVRAM BIOS address. */ + #define FARX_DATA_FLAG BIT_31 + #define FARX_ACCESS_FLASH_CONF 0x7FFD0000 + #define FARX_ACCESS_FLASH_DATA 0x7FF00000 +@@ -1138,9 +1138,9 @@ struct device_reg_24xx { + #define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 + #define HW_EVENT_FLASH_FW_ERR 0xF024 + +- uint32_t flash_data; /* Flash/NVRAM BIOS data. */ ++ __le32 flash_data; /* Flash/NVRAM BIOS data. */ + +- uint32_t ctrl_status; /* Control/Status. */ ++ __le32 ctrl_status; /* Control/Status. */ + #define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */ + #define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */ + #define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */ +@@ -1166,35 +1166,35 @@ struct device_reg_24xx { + #define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */ + #define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */ + +- uint32_t ictrl; /* Interrupt control. */ ++ __le32 ictrl; /* Interrupt control. */ + #define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */ + +- uint32_t istatus; /* Interrupt status. */ ++ __le32 istatus; /* Interrupt status. */ + #define ISRX_RISC_INT BIT_3 /* RISC interrupt. */ + +- uint32_t unused_1[2]; /* Gap. */ ++ __le32 unused_1[2]; /* Gap. */ + + /* Request Queue. */ +- uint32_t req_q_in; /* In-Pointer. */ +- uint32_t req_q_out; /* Out-Pointer. */ ++ __le32 req_q_in; /* In-Pointer. */ ++ __le32 req_q_out; /* Out-Pointer. */ + /* Response Queue. */ +- uint32_t rsp_q_in; /* In-Pointer. */ +- uint32_t rsp_q_out; /* Out-Pointer. */ ++ __le32 rsp_q_in; /* In-Pointer. */ ++ __le32 rsp_q_out; /* Out-Pointer. */ + /* Priority Request Queue. */ +- uint32_t preq_q_in; /* In-Pointer. */ +- uint32_t preq_q_out; /* Out-Pointer. */ ++ __le32 preq_q_in; /* In-Pointer. */ ++ __le32 preq_q_out; /* Out-Pointer. */ + +- uint32_t unused_2[2]; /* Gap. */ ++ __le32 unused_2[2]; /* Gap. */ + + /* ATIO Queue. */ +- uint32_t atio_q_in; /* In-Pointer. */ +- uint32_t atio_q_out; /* Out-Pointer. */ ++ __le32 atio_q_in; /* In-Pointer. */ ++ __le32 atio_q_out; /* Out-Pointer. */ + +- uint32_t host_status; ++ __le32 host_status; + #define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */ + #define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */ + +- uint32_t hccr; /* Host command & control register. */ ++ __le32 hccr; /* Host command & control register. */ + /* HCCR statuses. */ + #define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ + #define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ +@@ -1216,7 +1216,7 @@ struct device_reg_24xx { + /* Clear RISC to PCI interrupt. */ + #define HCCRX_CLR_RISC_INT 0xA0000000 + +- uint32_t gpiod; /* GPIO Data register. */ ++ __le32 gpiod; /* GPIO Data register. */ + + /* LED update mask. */ + #define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18) +@@ -1235,7 +1235,7 @@ struct device_reg_24xx { + /* Data in/out. */ + #define GPDX_DATA_INOUT (BIT_1|BIT_0) + +- uint32_t gpioe; /* GPIO Enable register. */ ++ __le32 gpioe; /* GPIO Enable register. */ + /* Enable update mask. */ + #define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16) + /* Enable update mask. */ +@@ -1243,52 +1243,52 @@ struct device_reg_24xx { + /* Enable. */ + #define GPEX_ENABLE (BIT_1|BIT_0) + +- uint32_t iobase_addr; /* I/O Bus Base Address register. */ ++ __le32 iobase_addr; /* I/O Bus Base Address register. */ + +- uint32_t unused_3[10]; /* Gap. */ ++ __le32 unused_3[10]; /* Gap. */ + +- uint16_t mailbox0; +- uint16_t mailbox1; +- uint16_t mailbox2; +- uint16_t mailbox3; +- uint16_t mailbox4; +- uint16_t mailbox5; +- uint16_t mailbox6; +- uint16_t mailbox7; +- uint16_t mailbox8; +- uint16_t mailbox9; +- uint16_t mailbox10; +- uint16_t mailbox11; +- uint16_t mailbox12; +- uint16_t mailbox13; +- uint16_t mailbox14; +- uint16_t mailbox15; +- uint16_t mailbox16; +- uint16_t mailbox17; +- uint16_t mailbox18; +- uint16_t mailbox19; +- uint16_t mailbox20; +- uint16_t mailbox21; +- uint16_t mailbox22; +- uint16_t mailbox23; +- uint16_t mailbox24; +- uint16_t mailbox25; +- uint16_t mailbox26; +- uint16_t mailbox27; +- uint16_t mailbox28; +- uint16_t mailbox29; +- uint16_t mailbox30; +- uint16_t mailbox31; +- +- uint32_t iobase_window; +- uint32_t iobase_c4; +- uint32_t iobase_c8; +- uint32_t unused_4_1[6]; /* Gap. */ +- uint32_t iobase_q; +- uint32_t unused_5[2]; /* Gap. */ +- uint32_t iobase_select; +- uint32_t unused_6[2]; /* Gap. */ +- uint32_t iobase_sdata; ++ __le16 mailbox0; ++ __le16 mailbox1; ++ __le16 mailbox2; ++ __le16 mailbox3; ++ __le16 mailbox4; ++ __le16 mailbox5; ++ __le16 mailbox6; ++ __le16 mailbox7; ++ __le16 mailbox8; ++ __le16 mailbox9; ++ __le16 mailbox10; ++ __le16 mailbox11; ++ __le16 mailbox12; ++ __le16 mailbox13; ++ __le16 mailbox14; ++ __le16 mailbox15; ++ __le16 mailbox16; ++ __le16 mailbox17; ++ __le16 mailbox18; ++ __le16 mailbox19; ++ __le16 mailbox20; ++ __le16 mailbox21; ++ __le16 mailbox22; ++ __le16 mailbox23; ++ __le16 mailbox24; ++ __le16 mailbox25; ++ __le16 mailbox26; ++ __le16 mailbox27; ++ __le16 mailbox28; ++ __le16 mailbox29; ++ __le16 mailbox30; ++ __le16 mailbox31; ++ ++ __le32 iobase_window; ++ __le32 iobase_c4; ++ __le32 iobase_c8; ++ __le32 unused_4_1[6]; /* Gap. */ ++ __le32 iobase_q; ++ __le32 unused_5[2]; /* Gap. */ ++ __le32 iobase_select; ++ __le32 unused_6[2]; /* Gap. */ ++ __le32 iobase_sdata; + }; + /* RISC-RISC semaphore register PCI offet */ + #define RISC_REGISTER_BASE_OFFSET 0x7010 +@@ -1354,8 +1354,8 @@ struct mid_conf_entry_24xx { + struct mid_init_cb_24xx { + struct init_cb_24xx init_cb; + +- uint16_t count; +- uint16_t options; ++ __le16 count; ++ __le16 options; + + struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC]; + }; +@@ -1389,27 +1389,27 @@ struct vp_ctrl_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t vp_idx_failed; ++ __le16 vp_idx_failed; + +- uint16_t comp_status; /* Completion status. */ ++ __le16 comp_status; /* Completion status. */ + #define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */ + #define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */ + #define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */ + +- uint16_t command; ++ __le16 command; + #define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */ + #define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */ + #define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */ + #define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */ + #define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */ + +- uint16_t vp_count; ++ __le16 vp_count; + + uint8_t vp_idx_map[16]; +- uint16_t flags; +- uint16_t id; ++ __le16 flags; ++ __le16 id; + uint16_t reserved_4; +- uint16_t hopct; ++ __le16 hopct; + uint8_t reserved_5[24]; + }; + +@@ -1425,12 +1425,12 @@ struct vp_config_entry_24xx { + + uint32_t handle; /* System handle. */ + +- uint16_t flags; ++ __le16 flags; + #define CS_VF_BIND_VPORTS_TO_VF BIT_0 + #define CS_VF_SET_QOS_OF_VPORTS BIT_1 + #define CS_VF_SET_HOPS_OF_VPORTS BIT_2 + +- uint16_t comp_status; /* Completion status. */ ++ __le16 comp_status; /* Completion status. */ + #define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */ + #define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */ + #define CS_VCT_ERROR 0x03 /* Unknown error. */ +@@ -1457,9 +1457,9 @@ struct vp_config_entry_24xx { + uint16_t reserved_vp2; + uint8_t port_name_idx2[WWN_SIZE]; + uint8_t node_name_idx2[WWN_SIZE]; +- uint16_t id; ++ __le16 id; + uint16_t reserved_4; +- uint16_t hopct; ++ __le16 hopct; + uint8_t reserved_5[2]; + }; + +@@ -1486,7 +1486,7 @@ struct vp_rpt_id_entry_24xx { + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ +- uint32_t resv1; ++ __le32 resv1; + uint8_t vp_acquired; + uint8_t vp_setup; + uint8_t vp_idx; /* Format 0=reserved */ +@@ -1550,15 +1550,15 @@ struct vf_evfp_entry_24xx { + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ +- uint16_t comp_status; /* Completion status. */ +- uint16_t timeout; /* timeout */ +- uint16_t adim_tagging_mode; ++ __le16 comp_status; /* Completion status. */ ++ __le16 timeout; /* timeout */ ++ __le16 adim_tagging_mode; + +- uint16_t vfport_id; ++ __le16 vfport_id; + uint32_t exch_addr; + +- uint16_t nport_handle; /* N_PORT handle. */ +- uint16_t control_flags; ++ __le16 nport_handle; /* N_PORT handle. */ ++ __le16 control_flags; + uint32_t io_parameter_0; + uint32_t io_parameter_1; + __le64 tx_address __packed; /* Data segment 0 address. */ +@@ -1573,13 +1573,13 @@ struct vf_evfp_entry_24xx { + + struct qla_fdt_layout { + uint8_t sig[4]; +- uint16_t version; +- uint16_t len; +- uint16_t checksum; ++ __le16 version; ++ __le16 len; ++ __le16 checksum; + uint8_t unused1[2]; + uint8_t model[16]; +- uint16_t man_id; +- uint16_t id; ++ __le16 man_id; ++ __le16 id; + uint8_t flags; + uint8_t erase_cmd; + uint8_t alt_erase_cmd; +@@ -1588,15 +1588,15 @@ struct qla_fdt_layout { + uint8_t wrt_sts_reg_cmd; + uint8_t unprotect_sec_cmd; + uint8_t read_man_id_cmd; +- uint32_t block_size; +- uint32_t alt_block_size; +- uint32_t flash_size; +- uint32_t wrt_enable_data; ++ __le32 block_size; ++ __le32 alt_block_size; ++ __le32 flash_size; ++ __le32 wrt_enable_data; + uint8_t read_id_addr_len; + uint8_t wrt_disable_bits; + uint8_t read_dev_id_len; + uint8_t chip_erase_cmd; +- uint16_t read_timeout; ++ __le16 read_timeout; + uint8_t protect_sec_cmd; + uint8_t unused2[65]; + }; +@@ -1605,11 +1605,11 @@ struct qla_fdt_layout { + + struct qla_flt_location { + uint8_t sig[4]; +- uint16_t start_lo; +- uint16_t start_hi; ++ __le16 start_lo; ++ __le16 start_hi; + uint8_t version; + uint8_t unused[5]; +- uint16_t checksum; ++ __le16 checksum; + }; + + #define FLT_REG_FW 0x01 +@@ -1664,19 +1664,19 @@ struct qla_flt_location { + #define FLT_REG_PEP_SEC_28XX 0xF1 + + struct qla_flt_region { +- uint16_t code; ++ __le16 code; + uint8_t attribute; + uint8_t reserved; +- uint32_t size; +- uint32_t start; +- uint32_t end; ++ __le32 size; ++ __le32 start; ++ __le32 end; + }; + + struct qla_flt_header { +- uint16_t version; +- uint16_t length; +- uint16_t checksum; +- uint16_t unused; ++ __le16 version; ++ __le16 length; ++ __le16 checksum; ++ __le16 unused; + struct qla_flt_region region[0]; + }; + +@@ -1688,18 +1688,18 @@ struct qla_flt_header { + + struct qla_npiv_header { + uint8_t sig[2]; +- uint16_t version; +- uint16_t entries; +- uint16_t unused[4]; +- uint16_t checksum; ++ __le16 version; ++ __le16 entries; ++ __le16 unused[4]; ++ __le16 checksum; + }; + + struct qla_npiv_entry { +- uint16_t flags; +- uint16_t vf_id; ++ __le16 flags; ++ __le16 vf_id; + uint8_t q_qos; + uint8_t f_qos; +- uint16_t unused1; ++ __le16 unused1; + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + }; +@@ -1729,7 +1729,7 @@ struct verify_chip_entry_84xx { + + uint32_t handle; + +- uint16_t options; ++ __le16 options; + #define VCO_DONT_UPDATE_FW BIT_0 + #define VCO_FORCE_UPDATE BIT_1 + #define VCO_DONT_RESET_UPDATE BIT_2 +@@ -1737,18 +1737,18 @@ struct verify_chip_entry_84xx { + #define VCO_END_OF_DATA BIT_14 + #define VCO_ENABLE_DSD BIT_15 + +- uint16_t reserved_1; ++ __le16 reserved_1; + +- uint16_t data_seg_cnt; +- uint16_t reserved_2[3]; ++ __le16 data_seg_cnt; ++ __le16 reserved_2[3]; + +- uint32_t fw_ver; +- uint32_t exchange_address; ++ __le32 fw_ver; ++ __le32 exchange_address; + +- uint32_t reserved_3[3]; +- uint32_t fw_size; +- uint32_t fw_seq_size; +- uint32_t relative_offset; ++ __le32 reserved_3[3]; ++ __le32 fw_size; ++ __le32 fw_seq_size; ++ __le32 relative_offset; + + struct dsd64 dsd; + }; +@@ -1761,22 +1761,22 @@ struct verify_chip_rsp_84xx { + + uint32_t handle; + +- uint16_t comp_status; ++ __le16 comp_status; + #define CS_VCS_CHIP_FAILURE 0x3 + #define CS_VCS_BAD_EXCHANGE 0x8 + #define CS_VCS_SEQ_COMPLETEi 0x40 + +- uint16_t failure_code; ++ __le16 failure_code; + #define VFC_CHECKSUM_ERROR 0x1 + #define VFC_INVALID_LEN 0x2 + #define VFC_ALREADY_IN_PROGRESS 0x8 + +- uint16_t reserved_1[4]; ++ __le16 reserved_1[4]; + +- uint32_t fw_ver; +- uint32_t exchange_address; ++ __le32 fw_ver; ++ __le32 exchange_address; + +- uint32_t reserved_2[6]; ++ __le32 reserved_2[6]; + }; + + #define ACCESS_CHIP_IOCB_TYPE 0x2B +@@ -1788,24 +1788,24 @@ struct access_chip_84xx { + + uint32_t handle; + +- uint16_t options; ++ __le16 options; + #define ACO_DUMP_MEMORY 0x0 + #define ACO_LOAD_MEMORY 0x1 + #define ACO_CHANGE_CONFIG_PARAM 0x2 + #define ACO_REQUEST_INFO 0x3 + +- uint16_t reserved1; ++ __le16 reserved1; + +- uint16_t dseg_count; +- uint16_t reserved2[3]; ++ __le16 dseg_count; ++ __le16 reserved2[3]; + +- uint32_t parameter1; +- uint32_t parameter2; +- uint32_t parameter3; +- +- uint32_t reserved3[3]; +- uint32_t total_byte_cnt; +- uint32_t reserved4; ++ __le32 parameter1; ++ __le32 parameter2; ++ __le32 parameter3; ++ ++ __le32 reserved3[3]; ++ __le32 total_byte_cnt; ++ __le32 reserved4; + + struct dsd64 dsd; + }; +@@ -1818,11 +1818,11 @@ struct access_chip_rsp_84xx { + + uint32_t handle; + +- uint16_t comp_status; +- uint16_t failure_code; +- uint32_t residual_count; ++ __le16 comp_status; ++ __le16 failure_code; ++ __le32 residual_count; + +- uint32_t reserved[12]; ++ __le32 reserved[12]; + }; + + /* 81XX Support **************************************************************/ +@@ -1877,52 +1877,52 @@ struct access_chip_rsp_84xx { + struct nvram_81xx { + /* NVRAM header. */ + uint8_t id[4]; +- uint16_t nvram_version; +- uint16_t reserved_0; ++ __le16 nvram_version; ++ __le16 reserved_0; + + /* Firmware Initialization Control Block. */ +- uint16_t version; +- uint16_t reserved_1; +- uint16_t frame_payload_size; +- uint16_t execution_throttle; +- uint16_t exchange_count; +- uint16_t reserved_2; ++ __le16 version; ++ __le16 reserved_1; ++ __le16 frame_payload_size; ++ __le16 execution_throttle; ++ __le16 exchange_count; ++ __le16 reserved_2; + + uint8_t port_name[WWN_SIZE]; + uint8_t node_name[WWN_SIZE]; + +- uint16_t login_retry_count; +- uint16_t reserved_3; +- uint16_t interrupt_delay_timer; +- uint16_t login_timeout; +- +- uint32_t firmware_options_1; +- uint32_t firmware_options_2; +- uint32_t firmware_options_3; ++ __le16 login_retry_count; ++ __le16 reserved_3; ++ __le16 interrupt_delay_timer; ++ __le16 login_timeout; ++ ++ __le32 firmware_options_1; ++ __le32 firmware_options_2; ++ __le32 firmware_options_3; + +- uint16_t reserved_4[4]; ++ __le16 reserved_4[4]; + + /* Offset 64. */ + uint8_t enode_mac[6]; +- uint16_t reserved_5[5]; ++ __le16 reserved_5[5]; + + /* Offset 80. */ +- uint16_t reserved_6[24]; ++ __le16 reserved_6[24]; + + /* Offset 128. */ +- uint16_t ex_version; ++ __le16 ex_version; + uint8_t prio_fcf_matching_flags; + uint8_t reserved_6_1[3]; +- uint16_t pri_fcf_vlan_id; ++ __le16 pri_fcf_vlan_id; + uint8_t pri_fcf_fabric_name[8]; +- uint16_t reserved_6_2[7]; ++ __le16 reserved_6_2[7]; + uint8_t spma_mac_addr[6]; +- uint16_t reserved_6_3[14]; ++ __le16 reserved_6_3[14]; + + /* Offset 192. */ + uint8_t min_supported_speed; + uint8_t reserved_7_0; +- uint16_t reserved_7[31]; ++ __le16 reserved_7[31]; + + /* + * BIT 0 = Enable spinup delay +@@ -1955,26 +1955,26 @@ struct nvram_81xx { + * BIT 25 = Temp WWPN + * BIT 26-31 = + */ +- uint32_t host_p; ++ __le32 host_p; + + uint8_t alternate_port_name[WWN_SIZE]; + uint8_t alternate_node_name[WWN_SIZE]; + + uint8_t boot_port_name[WWN_SIZE]; +- uint16_t boot_lun_number; +- uint16_t reserved_8; ++ __le16 boot_lun_number; ++ __le16 reserved_8; + + uint8_t alt1_boot_port_name[WWN_SIZE]; +- uint16_t alt1_boot_lun_number; +- uint16_t reserved_9; ++ __le16 alt1_boot_lun_number; ++ __le16 reserved_9; + + uint8_t alt2_boot_port_name[WWN_SIZE]; +- uint16_t alt2_boot_lun_number; +- uint16_t reserved_10; ++ __le16 alt2_boot_lun_number; ++ __le16 reserved_10; + + uint8_t alt3_boot_port_name[WWN_SIZE]; +- uint16_t alt3_boot_lun_number; +- uint16_t reserved_11; ++ __le16 alt3_boot_lun_number; ++ __le16 reserved_11; + + /* + * BIT 0 = Selective Login +@@ -1986,35 +1986,35 @@ struct nvram_81xx { + * BIT 6 = Reserved + * BIT 7-31 = + */ +- uint32_t efi_parameters; ++ __le32 efi_parameters; + + uint8_t reset_delay; + uint8_t reserved_12; +- uint16_t reserved_13; ++ __le16 reserved_13; + +- uint16_t boot_id_number; +- uint16_t reserved_14; ++ __le16 boot_id_number; ++ __le16 reserved_14; + +- uint16_t max_luns_per_target; +- uint16_t reserved_15; ++ __le16 max_luns_per_target; ++ __le16 reserved_15; + +- uint16_t port_down_retry_count; +- uint16_t link_down_timeout; ++ __le16 port_down_retry_count; ++ __le16 link_down_timeout; + + /* FCode parameters. */ +- uint16_t fcode_parameter; ++ __le16 fcode_parameter; + +- uint16_t reserved_16[3]; ++ __le16 reserved_16[3]; + + /* Offset 352. */ + uint8_t reserved_17[4]; +- uint16_t reserved_18[5]; ++ __le16 reserved_18[5]; + uint8_t reserved_19[2]; +- uint16_t reserved_20[8]; ++ __le16 reserved_20[8]; + + /* Offset 384. */ + uint8_t reserved_21[16]; +- uint16_t reserved_22[3]; ++ __le16 reserved_22[3]; + + /* Offset 406 (0x196) Enhanced Features + * BIT 0 = Extended BB credits for LR +@@ -2027,20 +2027,20 @@ struct nvram_81xx { + uint16_t reserved_24[4]; + + /* Offset 416. */ +- uint16_t reserved_25[32]; ++ __le16 reserved_25[32]; + + /* Offset 480. */ + uint8_t model_name[16]; + + /* Offset 496. */ +- uint16_t feature_mask_l; +- uint16_t feature_mask_h; +- uint16_t reserved_26[2]; ++ __le16 feature_mask_l; ++ __le16 feature_mask_h; ++ __le16 reserved_26[2]; + +- uint16_t subsystem_vendor_id; +- uint16_t subsystem_device_id; ++ __le16 subsystem_vendor_id; ++ __le16 subsystem_device_id; + +- uint32_t checksum; ++ __le32 checksum; + }; + + /* +@@ -2049,31 +2049,31 @@ struct nvram_81xx { + */ + #define ICB_VERSION 1 + struct init_cb_81xx { +- uint16_t version; +- uint16_t reserved_1; ++ __le16 version; ++ __le16 reserved_1; + +- uint16_t frame_payload_size; +- uint16_t execution_throttle; +- uint16_t exchange_count; ++ __le16 frame_payload_size; ++ __le16 execution_throttle; ++ __le16 exchange_count; + +- uint16_t reserved_2; ++ __le16 reserved_2; + + uint8_t port_name[WWN_SIZE]; /* Big endian. */ + uint8_t node_name[WWN_SIZE]; /* Big endian. */ + +- uint16_t response_q_inpointer; +- uint16_t request_q_outpointer; ++ __le16 response_q_inpointer; ++ __le16 request_q_outpointer; + +- uint16_t login_retry_count; ++ __le16 login_retry_count; + +- uint16_t prio_request_q_outpointer; ++ __le16 prio_request_q_outpointer; + +- uint16_t response_q_length; +- uint16_t request_q_length; ++ __le16 response_q_length; ++ __le16 request_q_length; + +- uint16_t reserved_3; ++ __le16 reserved_3; + +- uint16_t prio_request_q_length; ++ __le16 prio_request_q_length; + + __le64 request_q_address __packed; + __le64 response_q_address __packed; +@@ -2081,12 +2081,12 @@ struct init_cb_81xx { + + uint8_t reserved_4[8]; + +- uint16_t atio_q_inpointer; +- uint16_t atio_q_length; ++ __le16 atio_q_inpointer; ++ __le16 atio_q_length; + __le64 atio_q_address __packed; + +- uint16_t interrupt_delay_timer; /* 100us increments. */ +- uint16_t login_timeout; ++ __le16 interrupt_delay_timer; /* 100us increments. */ ++ __le16 login_timeout; + + /* + * BIT 0-3 = Reserved +@@ -2099,7 +2099,7 @@ struct init_cb_81xx { + * BIT 14 = Node Name Option + * BIT 15-31 = Reserved + */ +- uint32_t firmware_options_1; ++ __le32 firmware_options_1; + + /* + * BIT 0 = Operation Mode bit 0 +@@ -2117,7 +2117,7 @@ struct init_cb_81xx { + * BIT 14 = Enable Target PRLI Control + * BIT 15-31 = Reserved + */ +- uint32_t firmware_options_2; ++ __le32 firmware_options_2; + + /* + * BIT 0-3 = Reserved +@@ -2138,7 +2138,7 @@ struct init_cb_81xx { + * BIT 28 = SPMA selection bit 1 + * BIT 30-31 = Reserved + */ +- uint32_t firmware_options_3; ++ __le32 firmware_options_3; + + uint8_t reserved_5[8]; + +--- a/drivers/scsi/qla2xxx/qla_inline.h ++++ b/drivers/scsi/qla2xxx/qla_inline.h +@@ -40,7 +40,7 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, + * register value. + */ + static __inline__ uint16_t +-qla2x00_debounce_register(volatile uint16_t __iomem *addr) ++qla2x00_debounce_register(volatile __le16 __iomem *addr) + { + volatile uint16_t first; + volatile uint16_t second; +--- a/drivers/scsi/qla2xxx/qla_mr.h ++++ b/drivers/scsi/qla2xxx/qla_mr.h +@@ -96,7 +96,7 @@ struct tsk_mgmt_entry_fx00 { + uint8_t sys_define; + uint8_t entry_status; /* Entry Status. */ + +- __le32 handle; /* System handle. */ ++ uint32_t handle; /* System handle. */ + + uint32_t reserved_0; + +@@ -121,13 +121,13 @@ struct abort_iocb_entry_fx00 { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + +- __le32 handle; /* System handle. */ ++ uint32_t handle; /* System handle. */ + __le32 reserved_0; + + __le16 tgt_id_sts; /* Completion status. */ + __le16 options; + +- __le32 abort_handle; /* System handle. */ ++ uint32_t abort_handle; /* System handle. */ + __le32 reserved_2; + + __le16 req_que_no; +@@ -166,7 +166,7 @@ struct fxdisc_entry_fx00 { + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + +- __le32 handle; /* System handle. */ ++ uint32_t handle; /* System handle. */ + __le32 reserved_0; /* System handle. */ + + __le16 func_num; +--- a/drivers/scsi/qla2xxx/qla_nvme.h ++++ b/drivers/scsi/qla2xxx/qla_nvme.h +@@ -48,26 +48,26 @@ struct cmd_nvme { + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ +- uint16_t nport_handle; /* N_PORT handle. */ +- uint16_t timeout; /* Command timeout. */ ++ __le16 nport_handle; /* N_PORT handle. */ ++ __le16 timeout; /* Command timeout. */ + +- uint16_t dseg_count; /* Data segment count. */ +- uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */ ++ __le16 dseg_count; /* Data segment count. */ ++ __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */ + + uint64_t rsvd; + +- uint16_t control_flags; /* Control Flags */ ++ __le16 control_flags; /* Control Flags */ + #define CF_NVME_FIRST_BURST_ENABLE BIT_11 + #define CF_DIF_SEG_DESCR_ENABLE BIT_3 + #define CF_DATA_SEG_DESCR_ENABLE BIT_2 + #define CF_READ_DATA BIT_1 + #define CF_WRITE_DATA BIT_0 + +- uint16_t nvme_cmnd_dseg_len; /* Data segment length. */ ++ __le16 nvme_cmnd_dseg_len; /* Data segment length. */ + __le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */ + __le64 nvme_rsp_dseg_address __packed; /* Data segment address. */ + +- uint32_t byte_count; /* Total byte count. */ ++ __le32 byte_count; /* Total byte count. */ + + uint8_t port_id[3]; /* PortID of destination port. */ + uint8_t vp_index; +@@ -82,24 +82,24 @@ struct pt_ls4_request { + uint8_t sys_define; + uint8_t entry_status; + uint32_t handle; +- uint16_t status; +- uint16_t nport_handle; +- uint16_t tx_dseg_count; ++ __le16 status; ++ __le16 nport_handle; ++ __le16 tx_dseg_count; + uint8_t vp_index; + uint8_t rsvd; +- uint16_t timeout; +- uint16_t control_flags; ++ __le16 timeout; ++ __le16 control_flags; + #define CF_LS4_SHIFT 13 + #define CF_LS4_ORIGINATOR 0 + #define CF_LS4_RESPONDER 1 + #define CF_LS4_RESPONDER_TERM 2 + +- uint16_t rx_dseg_count; +- uint16_t rsvd2; +- uint32_t exchange_address; +- uint32_t rsvd3; +- uint32_t rx_byte_count; +- uint32_t tx_byte_count; ++ __le16 rx_dseg_count; ++ __le16 rsvd2; ++ __le32 exchange_address; ++ __le32 rsvd3; ++ __le32 rx_byte_count; ++ __le32 tx_byte_count; + struct dsd64 dsd[2]; + }; + +@@ -107,32 +107,32 @@ struct pt_ls4_request { + struct pt_ls4_rx_unsol { + uint8_t entry_type; + uint8_t entry_count; +- uint16_t rsvd0; +- uint16_t rsvd1; ++ __le16 rsvd0; ++ __le16 rsvd1; + uint8_t vp_index; + uint8_t rsvd2; +- uint16_t rsvd3; +- uint16_t nport_handle; +- uint16_t frame_size; +- uint16_t rsvd4; +- uint32_t exchange_address; ++ __le16 rsvd3; ++ __le16 nport_handle; ++ __le16 frame_size; ++ __le16 rsvd4; ++ __le32 exchange_address; + uint8_t d_id[3]; + uint8_t r_ctl; + be_id_t s_id; + uint8_t cs_ctl; + uint8_t f_ctl[3]; + uint8_t type; +- uint16_t seq_cnt; ++ __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; +- uint16_t rx_id; +- uint16_t ox_id; +- uint32_t param; +- uint32_t desc0; ++ __le16 rx_id; ++ __le16 ox_id; ++ __le32 param; ++ __le32 desc0; + #define PT_LS4_PAYLOAD_OFFSET 0x2c + #define PT_LS4_FIRST_PACKET_LEN 20 +- uint32_t desc_len; +- uint32_t payload[3]; ++ __le32 desc_len; ++ __le32 payload[3]; + }; + + /* +--- a/drivers/scsi/qla2xxx/qla_nx.h ++++ b/drivers/scsi/qla2xxx/qla_nx.h +@@ -800,16 +800,16 @@ struct qla82xx_legacy_intr_set { + #define QLA82XX_URI_FIRMWARE_IDX_OFF 29 + + struct qla82xx_uri_table_desc{ +- uint32_t findex; +- uint32_t num_entries; +- uint32_t entry_size; +- uint32_t reserved[5]; ++ __le32 findex; ++ __le32 num_entries; ++ __le32 entry_size; ++ __le32 reserved[5]; + }; + + struct qla82xx_uri_data_desc{ +- uint32_t findex; +- uint32_t size; +- uint32_t reserved[5]; ++ __le32 findex; ++ __le32 size; ++ __le32 reserved[5]; + }; + + /* UNIFIED ROMIMAGE END */ +@@ -829,22 +829,22 @@ struct qla82xx_uri_data_desc{ + * ISP 8021 I/O Register Set structure definitions. + */ + struct device_reg_82xx { +- uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */ +- uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */ +- uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */ +- +- uint16_t mailbox_in[32]; /* Mail box In registers */ +- uint16_t unused_1[32]; +- uint32_t hint; /* Host interrupt register */ ++ __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */ ++ __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */ ++ __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */ ++ ++ __le16 mailbox_in[32]; /* Mailbox In registers */ ++ __le16 unused_1[32]; ++ __le32 hint; /* Host interrupt register */ + #define HINT_MBX_INT_PENDING BIT_0 +- uint16_t unused_2[62]; +- uint16_t mailbox_out[32]; /* Mail box Out registers */ +- uint32_t unused_3[48]; ++ __le16 unused_2[62]; ++ __le16 mailbox_out[32]; /* Mailbox Out registers */ ++ __le32 unused_3[48]; + +- uint32_t host_status; /* host status */ ++ __le32 host_status; /* host status */ + #define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */ + #define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */ +- uint32_t host_int; /* Interrupt status. */ ++ __le32 host_int; /* Interrupt status. */ + #define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */ + }; + +--- a/drivers/scsi/qla2xxx/qla_target.h ++++ b/drivers/scsi/qla2xxx/qla_target.h +@@ -144,37 +144,37 @@ struct nack_to_isp { + uint8_t entry_status; /* Entry Status. */ + union { + struct { +- uint32_t sys_define_2; /* System defined. */ ++ __le32 sys_define_2; /* System defined. */ + target_id_t target; + uint8_t target_id; + uint8_t reserved_1; +- uint16_t flags; +- uint16_t resp_code; +- uint16_t status; +- uint16_t task_flags; +- uint16_t seq_id; +- uint16_t srr_rx_id; +- uint32_t srr_rel_offs; +- uint16_t srr_ui; +- uint16_t srr_flags; +- uint16_t srr_reject_code; ++ __le16 flags; ++ __le16 resp_code; ++ __le16 status; ++ __le16 task_flags; ++ __le16 seq_id; ++ __le16 srr_rx_id; ++ __le32 srr_rel_offs; ++ __le16 srr_ui; ++ __le16 srr_flags; ++ __le16 srr_reject_code; + uint8_t srr_reject_vendor_uniq; + uint8_t srr_reject_code_expl; + uint8_t reserved_2[24]; + } isp2x; + struct { + uint32_t handle; +- uint16_t nport_handle; ++ __le16 nport_handle; + uint16_t reserved_1; +- uint16_t flags; +- uint16_t srr_rx_id; +- uint16_t status; ++ __le16 flags; ++ __le16 srr_rx_id; ++ __le16 status; + uint8_t status_subcode; + uint8_t fw_handle; +- uint32_t exchange_address; +- uint32_t srr_rel_offs; +- uint16_t srr_ui; +- uint16_t srr_flags; ++ __le32 exchange_address; ++ __le32 srr_rel_offs; ++ __le16 srr_ui; ++ __le16 srr_flags; + uint8_t reserved_4[19]; + uint8_t vp_index; + uint8_t srr_reject_vendor_uniq; +@@ -184,7 +184,7 @@ struct nack_to_isp { + } isp24; + } u; + uint8_t reserved[2]; +- uint16_t ox_id; ++ __le16 ox_id; + } __packed; + #define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 + #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 +@@ -215,16 +215,16 @@ struct ctio_to_2xxx { + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ + target_id_t target; +- uint16_t rx_id; +- uint16_t flags; +- uint16_t status; +- uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ +- uint16_t dseg_count; /* Data segment count. */ +- uint32_t relative_offset; +- uint32_t residual; +- uint16_t reserved_1[3]; +- uint16_t scsi_status; +- uint32_t transfer_length; ++ __le16 rx_id; ++ __le16 flags; ++ __le16 status; ++ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ ++ __le16 dseg_count; /* Data segment count. */ ++ __le32 relative_offset; ++ __le32 residual; ++ __le16 reserved_1[3]; ++ __le16 scsi_status; ++ __le32 transfer_length; + struct dsd32 dsd[3]; + } __packed; + #define ATIO_PATH_INVALID 0x07 +@@ -266,7 +266,7 @@ struct fcp_hdr { + uint16_t seq_cnt; + __be16 ox_id; + uint16_t rx_id; +- uint32_t parameter; ++ __le32 parameter; + } __packed; + + struct fcp_hdr_le { +@@ -276,12 +276,12 @@ struct fcp_hdr_le { + uint8_t cs_ctl; + uint8_t f_ctl[3]; + uint8_t type; +- uint16_t seq_cnt; ++ __le16 seq_cnt; + uint8_t df_ctl; + uint8_t seq_id; +- uint16_t rx_id; +- uint16_t ox_id; +- uint32_t parameter; ++ __le16 rx_id; ++ __le16 ox_id; ++ __le32 parameter; + } __packed; + + #define F_CTL_EXCH_CONTEXT_RESP BIT_23 +@@ -315,7 +315,7 @@ struct atio7_fcp_cmnd { + * BUILD_BUG_ON in qlt_init(). + */ + uint8_t add_cdb[4]; +- /* uint32_t data_length; */ ++ /* __le32 data_length; */ + } __packed; + + /* +@@ -325,31 +325,31 @@ struct atio7_fcp_cmnd { + struct atio_from_isp { + union { + struct { +- uint16_t entry_hdr; ++ __le16 entry_hdr; + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ +- uint32_t sys_define_2; /* System defined. */ ++ __le32 sys_define_2; /* System defined. */ + target_id_t target; +- uint16_t rx_id; +- uint16_t flags; +- uint16_t status; ++ __le16 rx_id; ++ __le16 flags; ++ __le16 status; + uint8_t command_ref; + uint8_t task_codes; + uint8_t task_flags; + uint8_t execution_codes; + uint8_t cdb[MAX_CMDSZ]; +- uint32_t data_length; +- uint16_t lun; ++ __le32 data_length; ++ __le16 lun; + uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */ +- uint16_t reserved_32[6]; +- uint16_t ox_id; ++ __le16 reserved_32[6]; ++ __le16 ox_id; + } isp2x; + struct { +- uint16_t entry_hdr; ++ __le16 entry_hdr; + uint8_t fcp_cmnd_len_low; + uint8_t fcp_cmnd_len_high:4; + uint8_t attr:4; +- uint32_t exchange_addr; ++ __le32 exchange_addr; + #define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF + struct fcp_hdr fcp_hdr; + struct atio7_fcp_cmnd fcp_cmnd; +@@ -361,7 +361,7 @@ struct atio_from_isp { + #define FCP_CMD_LENGTH_MASK 0x0fff + #define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; +- uint32_t signature; ++ __le32 signature; + #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ + } raw; + } u; +@@ -404,36 +404,36 @@ struct ctio7_to_24xx { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ +- uint16_t nport_handle; ++ __le16 nport_handle; + #define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF +- uint16_t timeout; +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 timeout; ++ __le16 dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; + le_id_t initiator_id; + uint8_t reserved; +- uint32_t exchange_addr; ++ __le32 exchange_addr; + union { + struct { +- uint16_t reserved1; ++ __le16 reserved1; + __le16 flags; +- uint32_t residual; ++ __le32 residual; + __le16 ox_id; +- uint16_t scsi_status; +- uint32_t relative_offset; +- uint32_t reserved2; +- uint32_t transfer_length; +- uint32_t reserved3; ++ __le16 scsi_status; ++ __le32 relative_offset; ++ __le32 reserved2; ++ __le32 transfer_length; ++ __le32 reserved3; + struct dsd64 dsd; + } status0; + struct { +- uint16_t sense_length; ++ __le16 sense_length; + __le16 flags; +- uint32_t residual; ++ __le32 residual; + __le16 ox_id; +- uint16_t scsi_status; +- uint16_t response_len; +- uint16_t reserved; ++ __le16 scsi_status; ++ __le16 response_len; ++ __le16 reserved; + uint8_t sense_data[24]; + } status1; + } u; +@@ -449,18 +449,18 @@ struct ctio7_from_24xx { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; /* System defined handle */ +- uint16_t status; +- uint16_t timeout; +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 status; ++ __le16 timeout; ++ __le16 dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t reserved1[5]; +- uint32_t exchange_address; +- uint16_t reserved2; +- uint16_t flags; +- uint32_t residual; +- uint16_t ox_id; +- uint16_t reserved3; +- uint32_t relative_offset; ++ __le32 exchange_address; ++ __le16 reserved2; ++ __le16 flags; ++ __le32 residual; ++ __le16 ox_id; ++ __le16 reserved3; ++ __le32 relative_offset; + uint8_t reserved4[24]; + } __packed; + +@@ -498,29 +498,29 @@ struct ctio_crc2_to_fw { + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ +- uint16_t nport_handle; /* N_PORT handle. */ ++ __le16 nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + +- uint16_t dseg_count; /* Data segment count. */ ++ __le16 dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; /* additional flags */ + #define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 + + le_id_t initiator_id; /* initiator ID */ + uint8_t reserved1; +- uint32_t exchange_addr; /* rcv exchange address */ +- uint16_t reserved2; ++ __le32 exchange_addr; /* rcv exchange address */ ++ __le16 reserved2; + __le16 flags; /* refer to CTIO7 flags values */ +- uint32_t residual; ++ __le32 residual; + __le16 ox_id; +- uint16_t scsi_status; ++ __le16 scsi_status; + __le32 relative_offset; +- uint32_t reserved5; ++ __le32 reserved5; + __le32 transfer_length; /* total fc transfer length */ +- uint32_t reserved6; ++ __le32 reserved6; + __le64 crc_context_address __packed; /* Data segment address. */ +- uint16_t crc_context_len; /* Data segment length. */ +- uint16_t reserved_1; /* MUST be set to 0. */ ++ __le16 crc_context_len; /* Data segment length. */ ++ __le16 reserved_1; /* MUST be set to 0. */ + }; + + /* CTIO Type CRC_x Status IOCB */ +@@ -531,20 +531,20 @@ struct ctio_crc_from_fw { + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ +- uint16_t status; +- uint16_t timeout; /* Command timeout. */ +- uint16_t dseg_count; /* Data segment count. */ +- uint32_t reserved1; +- uint16_t state_flags; ++ __le16 status; ++ __le16 timeout; /* Command timeout. */ ++ __le16 dseg_count; /* Data segment count. */ ++ __le32 reserved1; ++ __le16 state_flags; + #define CTIO_CRC_SF_DIF_CHOPPED BIT_4 + +- uint32_t exchange_address; /* rcv exchange address */ +- uint16_t reserved2; +- uint16_t flags; +- uint32_t resid_xfer_length; +- uint16_t ox_id; ++ __le32 exchange_address; /* rcv exchange address */ ++ __le16 reserved2; ++ __le16 flags; ++ __le32 resid_xfer_length; ++ __le16 ox_id; + uint8_t reserved3[12]; +- uint16_t runt_guard; /* reported runt blk guard */ ++ __le16 runt_guard; /* reported runt blk guard */ + uint8_t actual_dif[8]; + uint8_t expected_dif[8]; + } __packed; +@@ -567,29 +567,29 @@ struct abts_recv_from_24xx { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint8_t reserved_1[6]; +- uint16_t nport_handle; ++ __le16 nport_handle; + uint8_t reserved_2[2]; + uint8_t vp_index; + uint8_t reserved_3:4; + uint8_t sof_type:4; +- uint32_t exchange_address; ++ __le32 exchange_address; + struct fcp_hdr_le fcp_hdr_le; + uint8_t reserved_4[16]; +- uint32_t exchange_addr_to_abort; ++ __le32 exchange_addr_to_abort; + } __packed; + + #define ABTS_PARAM_ABORT_SEQ BIT_0 + + struct ba_acc_le { +- uint16_t reserved; ++ __le16 reserved; + uint8_t seq_id_last; + uint8_t seq_id_valid; + #define SEQ_ID_VALID 0x80 + #define SEQ_ID_INVALID 0x00 +- uint16_t rx_id; +- uint16_t ox_id; +- uint16_t high_seq_cnt; +- uint16_t low_seq_cnt; ++ __le16 rx_id; ++ __le16 ox_id; ++ __le16 high_seq_cnt; ++ __le16 low_seq_cnt; + } __packed; + + struct ba_rjt_le { +@@ -613,21 +613,21 @@ struct abts_resp_to_24xx { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; +- uint16_t reserved_1; +- uint16_t nport_handle; +- uint16_t control_flags; ++ __le16 reserved_1; ++ __le16 nport_handle; ++ __le16 control_flags; + #define ABTS_CONTR_FLG_TERM_EXCHG BIT_0 + uint8_t vp_index; + uint8_t reserved_3:4; + uint8_t sof_type:4; +- uint32_t exchange_address; ++ __le32 exchange_address; + struct fcp_hdr_le fcp_hdr_le; + union { + struct ba_acc_le ba_acct; + struct ba_rjt_le ba_rjt; + } __packed payload; +- uint32_t reserved_4; +- uint32_t exchange_addr_to_abort; ++ __le32 reserved_4; ++ __le32 exchange_addr_to_abort; + } __packed; + + /* +@@ -643,21 +643,21 @@ struct abts_resp_from_24xx_fw { + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + uint32_t handle; +- uint16_t compl_status; ++ __le16 compl_status; + #define ABTS_RESP_COMPL_SUCCESS 0 + #define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31 +- uint16_t nport_handle; +- uint16_t reserved_1; ++ __le16 nport_handle; ++ __le16 reserved_1; + uint8_t reserved_2; + uint8_t reserved_3:4; + uint8_t sof_type:4; +- uint32_t exchange_address; ++ __le32 exchange_address; + struct fcp_hdr_le fcp_hdr_le; + uint8_t reserved_4[8]; +- uint32_t error_subcode1; ++ __le32 error_subcode1; + #define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E +- uint32_t error_subcode2; +- uint32_t exchange_addr_to_abort; ++ __le32 error_subcode2; ++ __le32 exchange_addr_to_abort; + } __packed; + + /********************************************************************\ +--- a/drivers/scsi/qla2xxx/qla_tmpl.h ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h +@@ -27,7 +27,7 @@ struct __packed qla27xx_fwdt_template { + uint32_t saved_state[16]; + + uint32_t reserved_3[8]; +- uint32_t firmware_version[5]; ++ __le32 firmware_version[5]; + }; + + #define TEMPLATE_TYPE_FWDUMP 99 diff --git a/patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-source-fi.patch b/patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-source-fi.patch new file mode 100644 index 0000000..bae9fff --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-source-fi.patch @@ -0,0 +1,2230 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:12 -0700 +Subject: scsi: qla2xxx: Fix endianness annotations in source files +Patch-mainline: v5.8-rc1 +Git-commit: 7ffa5b939751b6638e4a99518775c8503fbb46be +References: bsc#1171688 bsc#1174003 + +Fix all endianness complaints reported by sparse (C=2) without affecting +the behavior of the code on little endian CPUs. + +Link: https://lore.kernel.org/r/20200518211712.11395-16-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Daniel Wagner +Cc: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Reviewed-by: Daniel Wagner +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_attr.c | 3 - + drivers/scsi/qla2xxx/qla_bsg.c | 4 - + drivers/scsi/qla2xxx/qla_dbg.c | 87 +++++++++++++++++------------------ + drivers/scsi/qla2xxx/qla_init.c | 59 ++++++++++++------------ + drivers/scsi/qla2xxx/qla_iocb.c | 71 +++++++++++++++-------------- + drivers/scsi/qla2xxx/qla_isr.c | 93 ++++++++++++++++++-------------------- + drivers/scsi/qla2xxx/qla_mbx.c | 37 +++++++-------- + drivers/scsi/qla2xxx/qla_mr.c | 9 +-- + drivers/scsi/qla2xxx/qla_nvme.c | 8 +-- + drivers/scsi/qla2xxx/qla_nx.c | 89 ++++++++++++++++++------------------ + drivers/scsi/qla2xxx/qla_os.c | 27 +++++------ + drivers/scsi/qla2xxx/qla_sup.c | 69 +++++++++++++++------------- + drivers/scsi/qla2xxx/qla_target.c | 86 +++++++++++++++++------------------ + drivers/scsi/qla2xxx/qla_tmpl.c | 6 +- + 14 files changed, 329 insertions(+), 319 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -227,10 +227,9 @@ qla2x00_sysfs_write_nvram(struct file *f + + /* Checksum NVRAM. */ + if (IS_FWI2_CAPABLE(ha)) { +- uint32_t *iter; ++ __le32 *iter = (__force __le32 *)buf; + uint32_t chksum; + +- iter = (uint32_t *)buf; + chksum = 0; + for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) + chksum += le32_to_cpu(*iter); +--- a/drivers/scsi/qla2xxx/qla_bsg.c ++++ b/drivers/scsi/qla2xxx/qla_bsg.c +@@ -490,7 +490,7 @@ qla2x00_process_ct(struct bsg_job *bsg_j + >> 24; + switch (loop_id) { + case 0xFC: +- loop_id = cpu_to_le16(NPH_SNS); ++ loop_id = NPH_SNS; + break; + case 0xFA: + loop_id = vha->mgmt_svr_loop_id; +@@ -2042,7 +2042,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job + + /* Initialize all required fields of fcport */ + fcport->vha = vha; +- fcport->loop_id = piocb_rqst->dataword; ++ fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); + + sp->type = SRB_FXIOCB_BCMD; + sp->name = "bsg_fx_mgmt"; +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -189,8 +189,8 @@ qla27xx_dump_mpi_ram(struct qla_hw_data + } + + int +-qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, +- uint32_t ram_dwords, void **nxt) ++qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, ++ uint32_t ram_dwords, void **nxt) + { + int rval = QLA_FUNCTION_FAILED; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +@@ -254,9 +254,9 @@ qla24xx_dump_ram(struct qla_hw_data *ha, + return rval; + } + for (j = 0; j < dwords; j++) { +- ram[i + j] = +- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? +- chunk[j] : swab32(chunk[j]); ++ ram[i + j] = (__force __be32) ++ ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? ++ chunk[j] : swab32(chunk[j])); + } + } + +@@ -265,8 +265,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, + } + + static int +-qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, +- uint32_t cram_size, void **nxt) ++qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, ++ uint32_t cram_size, void **nxt) + { + int rval; + +@@ -286,11 +286,11 @@ qla24xx_dump_memory(struct qla_hw_data * + return rval; + } + +-static uint32_t * ++static __be32 * + qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, +- uint32_t count, uint32_t *buf) ++ uint32_t count, __be32 *buf) + { +- uint32_t __iomem *dmp_reg; ++ __le32 __iomem *dmp_reg; + + wrt_reg_dword(®->iobase_addr, iobase); + dmp_reg = ®->iobase_window; +@@ -368,7 +368,7 @@ qla24xx_soft_reset(struct qla_hw_data *h + } + + static int +-qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, ++qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, + uint32_t ram_words, void **nxt) + { + int rval; +@@ -376,7 +376,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, + uint16_t mb0; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + dma_addr_t dump_dma = ha->gid_list_dma; +- uint16_t *dump = (uint16_t *)ha->gid_list; ++ __le16 *dump = (__force __le16 *)ha->gid_list; + + rval = QLA_SUCCESS; + mb0 = 0; +@@ -441,7 +441,8 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, + if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + rval = mb0 & MBS_MASK; + for (idx = 0; idx < words; idx++) +- ram[cnt + idx] = swab16(dump[idx]); ++ ram[cnt + idx] = ++ cpu_to_be16(le16_to_cpu(dump[idx])); + } else { + rval = QLA_FUNCTION_FAILED; + } +@@ -453,9 +454,9 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, + + static inline void + qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, +- uint16_t *buf) ++ __be16 *buf) + { +- uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; ++ __le16 __iomem *dmp_reg = ®->u.isp2300.fb_cmd; + + for ( ; count--; dmp_reg++) + *buf++ = htons(rd_reg_word(dmp_reg)); +@@ -472,10 +473,10 @@ qla24xx_copy_eft(struct qla_hw_data *ha, + } + + static inline void * +-qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ++qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) + { + uint32_t cnt; +- uint32_t *iter_reg; ++ __be32 *iter_reg; + struct qla2xxx_fce_chain *fcec = ptr; + + if (!ha->fce) +@@ -499,7 +500,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, + } + + static inline void * +-qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ++qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) + { + struct qla2xxx_offld_chain *c = ptr; + +@@ -517,11 +518,11 @@ qla25xx_copy_exlogin(struct qla_hw_data + ptr += sizeof(struct qla2xxx_offld_chain); + memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); + +- return (char *)ptr + cpu_to_be32(c->size); ++ return (char *)ptr + be32_to_cpu(c->size); + } + + static inline void * +-qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ++qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) + { + struct qla2xxx_offld_chain *c = ptr; + +@@ -539,12 +540,12 @@ qla81xx_copy_exchoffld(struct qla_hw_dat + ptr += sizeof(struct qla2xxx_offld_chain); + memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); + +- return (char *)ptr + cpu_to_be32(c->size); ++ return (char *)ptr + be32_to_cpu(c->size); + } + + static inline void * + qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, +- uint32_t **last_chain) ++ __be32 **last_chain) + { + struct qla2xxx_mqueue_chain *q; + struct qla2xxx_mqueue_header *qh; +@@ -591,7 +592,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_da + } + + static inline void * +-qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ++qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) + { + struct qla2xxx_mqueue_chain *q; + struct qla2xxx_mqueue_header *qh; +@@ -662,7 +663,7 @@ qla25xx_copy_mqueues(struct qla_hw_data + } + + static inline void * +-qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ++qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) + { + uint32_t cnt, que_idx; + uint8_t que_cnt; +@@ -736,7 +737,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha) + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; +- uint16_t __iomem *dmp_reg; ++ __le16 __iomem *dmp_reg; + struct qla2300_fw_dump *fw; + void *nxt; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); +@@ -893,7 +894,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha) + uint16_t mb0 = 0, mb2 = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; +- uint16_t __iomem *dmp_reg; ++ __le16 __iomem *dmp_reg; + struct qla2100_fw_dump *fw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + +@@ -1074,13 +1075,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +- uint32_t __iomem *dmp_reg; +- uint32_t *iter_reg; +- uint16_t __iomem *mbx_reg; ++ __le32 __iomem *dmp_reg; ++ __be32 *iter_reg; ++ __le16 __iomem *mbx_reg; + struct qla24xx_fw_dump *fw; + void *nxt; + void *nxt_chain; +- uint32_t *last_chain = NULL; ++ __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); +@@ -1320,12 +1321,12 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +- uint32_t __iomem *dmp_reg; +- uint32_t *iter_reg; +- uint16_t __iomem *mbx_reg; ++ __le32 __iomem *dmp_reg; ++ __be32 *iter_reg; ++ __le16 __iomem *mbx_reg; + struct qla25xx_fw_dump *fw; + void *nxt, *nxt_chain; +- uint32_t *last_chain = NULL; ++ __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); +@@ -1633,12 +1634,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +- uint32_t __iomem *dmp_reg; +- uint32_t *iter_reg; +- uint16_t __iomem *mbx_reg; ++ __le32 __iomem *dmp_reg; ++ __be32 *iter_reg; ++ __le16 __iomem *mbx_reg; + struct qla81xx_fw_dump *fw; + void *nxt, *nxt_chain; +- uint32_t *last_chain = NULL; ++ __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); +@@ -1948,12 +1949,12 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +- uint32_t __iomem *dmp_reg; +- uint32_t *iter_reg; +- uint16_t __iomem *mbx_reg; ++ __le32 __iomem *dmp_reg; ++ __be32 *iter_reg; ++ __le16 __iomem *mbx_reg; + struct qla83xx_fw_dump *fw; + void *nxt, *nxt_chain; +- uint32_t *last_chain = NULL; ++ __be32 *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + lockdep_assert_held(&ha->hardware_lock); +@@ -2660,7 +2661,7 @@ ql_dump_regs(uint level, scsi_qla_host_t + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; +- uint16_t __iomem *mbx_reg; ++ __le16 __iomem *mbx_reg; + + if (!ql_mask_match(level)) + return; +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -120,7 +120,7 @@ static void qla24xx_abort_iocb_timeout(v + if (sp->cmd_sp) + sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); + +- abt->u.abt.comp_status = CS_TIMEOUT; ++ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); + sp->done(sp, QLA_OS_TIMER_EXPIRED); + } + +@@ -1793,7 +1793,7 @@ qla2x00_tmf_iocb_timeout(void *data) + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); +- tmf->u.tmf.comp_status = CS_TIMEOUT; ++ tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); + tmf->u.tmf.data = QLA_FUNCTION_FAILED; + complete(&tmf->u.tmf.comp); + } +@@ -4091,7 +4091,7 @@ qla24xx_config_rings(struct scsi_qla_hos + ql_dbg(ql_dbg_init, vha, 0x00fd, + "Speed set by user : %s Gbps \n", + qla2x00_get_link_speed_str(ha, ha->set_data_rate)); +- icb->firmware_options_3 = (ha->set_data_rate << 13); ++ icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); + } + + /* PCI posting */ +@@ -4182,12 +4182,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha) + mid_init_cb->init_cb.execution_throttle = + cpu_to_le16(ha->cur_fw_xcb_count); + ha->flags.dport_enabled = +- (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; ++ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & ++ BIT_7) != 0; + ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", + (ha->flags.dport_enabled) ? "enabled" : "disabled"); + /* FA-WWPN Status */ + ha->flags.fawwpn_enabled = +- (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; ++ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & ++ BIT_6) != 0; + ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", + (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); + } +@@ -7152,7 +7154,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vh + int rval; + struct init_cb_24xx *icb; + struct nvram_24xx *nv; +- uint32_t *dptr; ++ __le32 *dptr; + uint8_t *dptr1, *dptr2; + uint32_t chksum; + uint16_t cnt; +@@ -7180,7 +7182,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vh + ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); + + /* Get NVRAM data into cache and calculate checksum. */ +- dptr = (uint32_t *)nv; ++ dptr = (__force __le32 *)nv; + ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); + for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) + chksum += le32_to_cpu(*dptr); +@@ -7208,7 +7210,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vh + memset(nv, 0, ha->nvram_size); + nv->nvram_version = cpu_to_le16(ICB_VERSION); + nv->version = cpu_to_le16(ICB_VERSION); +- nv->frame_payload_size = 2048; ++ nv->frame_payload_size = cpu_to_le16(2048); + nv->execution_throttle = cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0); + nv->hard_address = cpu_to_le16(124); +@@ -7376,7 +7378,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vh + ha->login_retry_count = ql2xloginretrycount; + + /* N2N: driver will initiate Login instead of FW */ +- icb->firmware_options_3 |= BIT_8; ++ icb->firmware_options_3 |= cpu_to_le32(BIT_8); + + /* Enable ZIO. */ + if (!vha->flags.init_done) { +@@ -7444,7 +7446,7 @@ qla27xx_check_image_status_signature(str + static ulong + qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) + { +- uint32_t *p = (uint32_t *)image_status; ++ __le32 *p = (__force __le32 *)image_status; + uint n = sizeof(*image_status) / sizeof(*p); + uint32_t sum = 0; + +@@ -7732,11 +7734,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t + ql_dbg(ql_dbg_init, vha, 0x008d, + "-> Loading segment %u...\n", j); + qla24xx_read_flash_data(vha, dcode, faddr, 10); +- risc_addr = be32_to_cpu(dcode[2]); +- risc_size = be32_to_cpu(dcode[3]); ++ risc_addr = be32_to_cpu((__force __be32)dcode[2]); ++ risc_size = be32_to_cpu((__force __be32)dcode[3]); + if (!*srisc_addr) { + *srisc_addr = risc_addr; +- risc_attr = be32_to_cpu(dcode[9]); ++ risc_attr = be32_to_cpu((__force __be32)dcode[9]); + } + + dlen = ha->fw_transfer_size >> 2; +@@ -7778,7 +7780,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t + + dcode = (uint32_t *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 7); +- risc_size = be32_to_cpu(dcode[2]); ++ risc_size = be32_to_cpu((__force __be32)dcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0161, + "-> fwdt%u template array at %#x (%#x dwords)\n", + j, faddr, risc_size); +@@ -7847,7 +7849,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, + { + int rval; + int i, fragment; +- uint16_t *wcode, *fwcode; ++ uint16_t *wcode; ++ __be16 *fwcode; + uint32_t risc_addr, risc_size, fwclen, wlen, *seg; + struct fw_blob *blob; + struct qla_hw_data *ha = vha->hw; +@@ -7867,7 +7870,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, + + wcode = (uint16_t *)req->ring; + *srisc_addr = 0; +- fwcode = (uint16_t *)blob->fw->data; ++ fwcode = (__force __be16 *)blob->fw->data; + fwclen = 0; + + /* Validate firmware image by checking version. */ +@@ -7915,7 +7918,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, + "words 0x%x.\n", risc_addr, wlen); + + for (i = 0; i < wlen; i++) +- wcode[i] = swab16(fwcode[i]); ++ wcode[i] = swab16((__force u32)fwcode[i]); + + rval = qla2x00_load_ram(vha, req->dma, risc_addr, + wlen); +@@ -7952,7 +7955,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t * + ulong i; + uint j; + struct fw_blob *blob; +- uint32_t *fwcode; ++ __be32 *fwcode; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct fwdt *fwdt = ha->fwdt; +@@ -7968,8 +7971,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t * + return QLA_FUNCTION_FAILED; + } + +- fwcode = (uint32_t *)blob->fw->data; +- dcode = fwcode; ++ fwcode = (__force __be32 *)blob->fw->data; ++ dcode = (__force uint32_t *)fwcode; + if (qla24xx_risc_firmware_invalid(dcode)) { + ql_log(ql_log_fatal, vha, 0x0093, + "Unable to verify integrity of firmware image (%zd).\n", +@@ -8006,7 +8009,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t * + dlen); + + for (i = 0; i < dlen; i++) +- dcode[i] = swab32(fwcode[i]); ++ dcode[i] = swab32((__force u32)fwcode[i]); + + rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); + if (rval) { +@@ -8060,7 +8063,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t * + + dcode = fwdt->template; + for (i = 0; i < risc_size; i++) +- dcode[i] = fwcode[i]; ++ dcode[i] = (__force u32)fwcode[i]; + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0175, +@@ -8331,7 +8334,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vh + int rval; + struct init_cb_81xx *icb; + struct nvram_81xx *nv; +- uint32_t *dptr; ++ __le32 *dptr; + uint8_t *dptr1, *dptr2; + uint32_t chksum; + uint16_t cnt; +@@ -8378,7 +8381,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vh + "primary" : "secondary"); + ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); + +- dptr = (uint32_t *)nv; ++ dptr = (__force __le32 *)nv; + for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) + chksum += le32_to_cpu(*dptr); + +@@ -8405,7 +8408,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vh + memset(nv, 0, ha->nvram_size); + nv->nvram_version = cpu_to_le16(ICB_VERSION); + nv->version = cpu_to_le16(ICB_VERSION); +- nv->frame_payload_size = 2048; ++ nv->frame_payload_size = cpu_to_le16(2048); + nv->execution_throttle = cpu_to_le16(0xFFFF); + nv->exchange_count = cpu_to_le16(0); + nv->port_name[0] = 0x21; +@@ -8449,7 +8452,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vh + } + + if (IS_T10_PI_CAPABLE(ha)) +- nv->frame_payload_size &= ~7; ++ nv->frame_payload_size &= cpu_to_le16(~7); + + qlt_81xx_config_nvram_stage1(vha, nv); + +@@ -8612,10 +8615,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vh + } + + /* enable RIDA Format2 */ +- icb->firmware_options_3 |= BIT_0; ++ icb->firmware_options_3 |= cpu_to_le32(BIT_0); + + /* N2N: driver will initiate Login instead of FW */ +- icb->firmware_options_3 |= BIT_8; ++ icb->firmware_options_3 |= cpu_to_le32(BIT_8); + + /* Determine NVMe/FCP priority for target ports */ + ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -661,7 +661,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *s + cur_dsd->address = 0; + cur_dsd->length = 0; + cur_dsd++; +- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; ++ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); + return 0; + } + +@@ -755,8 +755,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, stru + } + + struct fw_dif_context { +- uint32_t ref_tag; +- uint16_t app_tag; ++ __le32 ref_tag; ++ __le16 app_tag; + uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ + uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ + }; +@@ -1389,7 +1389,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp + uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) + { + struct dsd64 *cur_dsd; +- uint32_t *fcp_dl; ++ __be32 *fcp_dl; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + uint32_t total_bytes = 0; +@@ -1456,7 +1456,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp + &crc_ctx_pkt->ref_tag, tot_prot_dsds); + + put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); +- cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; ++ cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); + + /* Determine SCSI command length -- align to 4 byte boundary */ + if (cmd->cmd_len > 16) { +@@ -1545,7 +1545,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp + crc_ctx_pkt->guard_seed = cpu_to_le16(0); + /* Fibre channel byte count */ + cmd_pkt->byte_count = cpu_to_le32(total_bytes); +- fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + ++ fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + + additional_fcpcdb_len); + *fcp_dl = htonl(total_bytes); + +@@ -2344,9 +2344,10 @@ qla24xx_prli_iocb(srb_t *sp, struct logi + logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; + logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); + if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { +- logio->control_flags |= LCF_NVME_PRLI; ++ logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); + if (sp->vha->flags.nvme_first_burst) +- logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST; ++ logio->io_parameter[0] = ++ cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); + } + + logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); +@@ -2683,7 +2684,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct + els_iocb->entry_status = 0; + els_iocb->handle = sp->handle; + els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); +- els_iocb->tx_dsd_count = 1; ++ els_iocb->tx_dsd_count = cpu_to_le16(1); + els_iocb->vp_index = vha->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = 0; +@@ -2703,7 +2704,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct + cpu_to_le32(sizeof(struct els_plogi_payload)); + put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, + &els_iocb->tx_address); +- els_iocb->rx_dsd_count = 1; ++ els_iocb->rx_dsd_count = cpu_to_le16(1); + els_iocb->rx_byte_count = els_iocb->rx_len = + cpu_to_le32(sizeof(struct els_plogi_payload)); + put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, +@@ -2715,7 +2716,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct + (uint8_t *)els_iocb, + sizeof(*els_iocb)); + } else { +- els_iocb->control_flags = 1 << 13; ++ els_iocb->control_flags = cpu_to_le16(1 << 13); + els_iocb->tx_byte_count = + cpu_to_le32(sizeof(struct els_logo_payload)); + put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, +@@ -2790,7 +2791,7 @@ static void qla2x00_els_dcmd2_sp_done(sr + struct qla_work_evt *e; + struct fc_port *conflict_fcport; + port_id_t cid; /* conflict Nport id */ +- u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; ++ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; + u16 lid; + + ql_dbg(ql_dbg_disc, vha, 0x3072, +@@ -2803,7 +2804,7 @@ static void qla2x00_els_dcmd2_sp_done(sr + if (sp->flags & SRB_WAKEUP_ON_COMP) + complete(&lio->u.els_plogi.comp); + else { +- switch (fw_status[0]) { ++ switch (le32_to_cpu(fw_status[0])) { + case CS_DATA_UNDERRUN: + case CS_COMPLETE: + memset(&ea, 0, sizeof(ea)); +@@ -2813,9 +2814,9 @@ static void qla2x00_els_dcmd2_sp_done(sr + break; + + case CS_IOCB_ERROR: +- switch (fw_status[1]) { ++ switch (le32_to_cpu(fw_status[1])) { + case LSC_SCODE_PORTID_USED: +- lid = fw_status[2] & 0xffff; ++ lid = le32_to_cpu(fw_status[2]) & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(fcport->port_name), + fcport->d_id, lid, &conflict_fcport); +@@ -2849,9 +2850,11 @@ static void qla2x00_els_dcmd2_sp_done(sr + break; + + case LSC_SCODE_NPORT_USED: +- cid.b.domain = (fw_status[2] >> 16) & 0xff; +- cid.b.area = (fw_status[2] >> 8) & 0xff; +- cid.b.al_pa = fw_status[2] & 0xff; ++ cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) ++ & 0xff; ++ cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) ++ & 0xff; ++ cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x20ec, +@@ -3025,7 +3028,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_e + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = sp->handle; +- els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); ++ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); + els_iocb->vp_index = sp->vha->vp_idx; + els_iocb->sof_type = EST_SOFI3; +@@ -3219,7 +3222,7 @@ qla82xx_start_scsi(srb_t *sp) + uint16_t tot_dsds; + struct device_reg_82xx __iomem *reg; + uint32_t dbval; +- uint32_t *fcp_dl; ++ __be32 *fcp_dl; + uint8_t additional_cdb_len; + struct ct6_dsd *ctx; + struct scsi_qla_host *vha = sp->vha; +@@ -3401,7 +3404,7 @@ qla82xx_start_scsi(srb_t *sp) + + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + +- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + ++ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + additional_cdb_len); + *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); + +@@ -3539,7 +3542,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abo + memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); + abt_iocb->entry_type = ABORT_IOCB_TYPE; + abt_iocb->entry_count = 1; +- abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle)); ++ abt_iocb->handle = make_handle(req->id, sp->handle); + if (sp->fcport) { + abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; +@@ -3547,10 +3550,10 @@ qla24xx_abort_iocb(srb_t *sp, struct abo + abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; + } + abt_iocb->handle_to_abort = +- cpu_to_le32(make_handle(aio->u.abt.req_que_no, +- aio->u.abt.cmd_hndl)); ++ make_handle(le16_to_cpu(aio->u.abt.req_que_no), ++ aio->u.abt.cmd_hndl); + abt_iocb->vp_index = vha->vp_idx; +- abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); ++ abt_iocb->req_que_no = aio->u.abt.req_que_no; + /* Send the command to the firmware */ + wmb(); + } +@@ -3565,7 +3568,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24 + sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); + + for (i = 0; i < sz; i++) +- mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); ++ mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; + } + + static void +@@ -3589,7 +3592,7 @@ static void qla2x00_send_notify_ack_iocb + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & +- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); ++ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; +@@ -3616,20 +3619,20 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_req + nvme = &sp->u.iocb_cmd; + cmd_pkt->entry_type = PT_LS4_REQUEST; + cmd_pkt->entry_count = 1; +- cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT; ++ cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); + + cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->vp_index = sp->fcport->vha->vp_idx; + +- cmd_pkt->tx_dseg_count = 1; +- cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; +- cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; ++ cmd_pkt->tx_dseg_count = cpu_to_le16(1); ++ cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); ++ cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); + put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); + +- cmd_pkt->rx_dseg_count = 1; +- cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; +- cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; ++ cmd_pkt->rx_dseg_count = cpu_to_le16(1); ++ cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); ++ cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); + put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); + + return rval; +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -89,9 +89,9 @@ qla24xx_process_abts(struct scsi_qla_hos + /* terminate exchange */ + rsp_els->entry_type = ELS_IOCB_TYPE; + rsp_els->entry_count = 1; +- rsp_els->nport_handle = ~0; ++ rsp_els->nport_handle = cpu_to_le16(~0); + rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; +- rsp_els->control_flags = EPD_RX_XCHG; ++ rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); + ql_dbg(ql_dbg_init, vha, 0x0283, + "Sending ELS Response to terminate exchange %#x...\n", + abts->rx_xch_addr_to_abort); +@@ -141,7 +141,7 @@ qla24xx_process_abts(struct scsi_qla_hos + abts_rsp->ox_id = abts->ox_id; + abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; + abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; +- abts_rsp->payload.ba_acc.high_seq_cnt = ~0; ++ abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); + abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; + ql_dbg(ql_dbg_init, vha, 0x028b, + "Sending BA ACC response to ABTS %#x...\n", +@@ -412,7 +412,7 @@ qla2x00_mbx_completion(scsi_qla_host_t * + { + uint16_t cnt; + uint32_t mboxes; +- uint16_t __iomem *wptr; ++ __le16 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + +@@ -428,11 +428,11 @@ qla2x00_mbx_completion(scsi_qla_host_t * + ha->flags.mbox_int = 1; + ha->mailbox_out[0] = mb0; + mboxes >>= 1; +- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); ++ wptr = MAILBOX_REG(ha, reg, 1); + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + if (IS_QLA2200(ha) && cnt == 8) +- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); ++ wptr = MAILBOX_REG(ha, reg, 8); + if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) + ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); + else if (mboxes & BIT_0) +@@ -456,9 +456,9 @@ qla81xx_idc_event(scsi_qla_host_t *vha, + + /* Seed data -- mailbox1 -> mailbox7. */ + if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) +- wptr = (uint16_t __iomem *)®24->mailbox1; ++ wptr = ®24->mailbox1; + else if (IS_QLA8044(vha->hw)) +- wptr = (uint16_t __iomem *)®82->mailbox_out[1]; ++ wptr = ®82->mailbox_out[1]; + else + return; + +@@ -818,7 +818,7 @@ qla2x00_async_event(scsi_qla_host_t *vha + goto skip_rio; + switch (mb[0]) { + case MBA_SCSI_COMPLETION: +- handles[0] = le32_to_cpu(make_handle(mb[2], mb[1])); ++ handles[0] = make_handle(mb[2], mb[1]); + handle_cnt = 1; + break; + case MBA_CMPLT_1_16BIT: +@@ -857,10 +857,9 @@ qla2x00_async_event(scsi_qla_host_t *vha + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_2_32BIT: +- handles[0] = le32_to_cpu(make_handle(mb[2], mb[1])); +- handles[1] = +- le32_to_cpu(make_handle(RD_MAILBOX_REG(ha, reg, 7), +- RD_MAILBOX_REG(ha, reg, 6))); ++ handles[0] = make_handle(mb[2], mb[1]); ++ handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), ++ RD_MAILBOX_REG(ha, reg, 6)); + handle_cnt = 2; + mb[0] = MBA_SCSI_COMPLETION; + break; +@@ -1666,7 +1665,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t * + sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); + + for (i = 0; i < sz; i++) +- si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); ++ si->u.mbx.in_mb[i] = pkt->mb[i]; + + res = (si->u.mbx.in_mb[0] & MBS_MASK); + +@@ -1767,6 +1766,7 @@ static void + qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, + struct sts_entry_24xx *pkt, int iocb_type) + { ++ struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; + const char func[] = "ELS_CT_IOCB"; + const char *type; + srb_t *sp; +@@ -1816,23 +1816,22 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vh + } + + comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); +- fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); +- fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); ++ fw_status[1] = le32_to_cpu(ese->error_subcode_1); ++ fw_status[2] = le32_to_cpu(ese->error_subcode_2); + + if (iocb_type == ELS_IOCB_TYPE) { + els = &sp->u.iocb_cmd; +- els->u.els_plogi.fw_status[0] = fw_status[0]; +- els->u.els_plogi.fw_status[1] = fw_status[1]; +- els->u.els_plogi.fw_status[2] = fw_status[2]; +- els->u.els_plogi.comp_status = fw_status[0]; ++ els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); ++ els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); ++ els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); ++ els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); + if (comp_status == CS_COMPLETE) { + res = DID_OK << 16; + } else { + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; +- els->u.els_plogi.len = +- le16_to_cpu(((struct els_sts_entry_24xx *) +- pkt)->total_byte_count); ++ els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( ++ ese->total_byte_count)); + } else { + els->u.els_plogi.len = 0; + res = DID_ERROR << 16; +@@ -1841,8 +1840,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vh + ql_dbg(ql_dbg_user, vha, 0x503f, + "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", + type, sp->handle, comp_status, fw_status[1], fw_status[2], +- le16_to_cpu(((struct els_sts_entry_24xx *) +- pkt)->total_byte_count)); ++ le32_to_cpu(ese->total_byte_count)); + goto els_ct_done; + } + +@@ -1858,23 +1856,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vh + if (comp_status == CS_DATA_UNDERRUN) { + res = DID_OK << 16; + bsg_reply->reply_payload_rcv_len = +- le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); ++ le32_to_cpu(ese->total_byte_count); + + ql_dbg(ql_dbg_user, vha, 0x503f, + "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " + "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", + type, sp->handle, comp_status, fw_status[1], fw_status[2], +- le16_to_cpu(((struct els_sts_entry_24xx *) +- pkt)->total_byte_count)); ++ le32_to_cpu(ese->total_byte_count)); + } else { + ql_dbg(ql_dbg_user, vha, 0x5040, + "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " + "error subcode 1=0x%x error subcode 2=0x%x.\n", + type, sp->handle, comp_status, +- le16_to_cpu(((struct els_sts_entry_24xx *) +- pkt)->error_subcode_1), +- le16_to_cpu(((struct els_sts_entry_24xx *) +- pkt)->error_subcode_2)); ++ le32_to_cpu(ese->error_subcode_1), ++ le32_to_cpu(ese->error_subcode_2)); + res = DID_ERROR << 16; + bsg_reply->reply_payload_rcv_len = 0; + } +@@ -2082,7 +2077,7 @@ static void qla24xx_nvme_iocb_entry(scsi + uint16_t state_flags; + struct nvmefc_fcp_req *fd; + uint16_t ret = QLA_SUCCESS; +- uint16_t comp_status = le16_to_cpu(sts->comp_status); ++ __le16 comp_status = sts->comp_status; + int logit = 0; + + iocb = &sp->u.iocb_cmd; +@@ -2113,7 +2108,7 @@ static void qla24xx_nvme_iocb_entry(scsi + } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == + (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { + /* Response already DMA'd to fd->rspaddr. */ +- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); ++ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; + } else if ((state_flags & SF_FCP_RSP_DMA)) { + /* + * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this +@@ -2130,8 +2125,8 @@ static void qla24xx_nvme_iocb_entry(scsi + + inbuf = (uint32_t *)&sts->nvme_ersp_data; + outbuf = (uint32_t *)fd->rspaddr; +- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); +- if (unlikely(iocb->u.nvme.rsp_pyld_len > ++ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; ++ if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > + sizeof(struct nvme_fc_ersp_iu))) { + if (ql_mask_match(ql_dbg_io)) { + WARN_ONCE(1, "Unexpected response payload length %u.\n", +@@ -2141,9 +2136,9 @@ static void qla24xx_nvme_iocb_entry(scsi + iocb->u.nvme.rsp_pyld_len); + } + iocb->u.nvme.rsp_pyld_len = +- sizeof(struct nvme_fc_ersp_iu); ++ cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); + } +- iter = iocb->u.nvme.rsp_pyld_len >> 2; ++ iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; + for (; iter; iter--) + *outbuf++ = swab32(*inbuf++); + } +@@ -2158,7 +2153,7 @@ static void qla24xx_nvme_iocb_entry(scsi + "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", + tgt_xfer_len, fd->transferred_length); + logit = 1; +- } else if (comp_status == CS_DATA_UNDERRUN) { ++ } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { + /* + * Do not log if this is just an underflow and there + * is no data loss. +@@ -2178,7 +2173,7 @@ static void qla24xx_nvme_iocb_entry(scsi + * If transport error then Failure (HBA rejects request) + * otherwise transport will handle. + */ +- switch (comp_status) { ++ switch (le16_to_cpu(comp_status)) { + case CS_COMPLETE: + break; + +@@ -2411,9 +2406,9 @@ qla2x00_handle_dif_error(srb_t *sp, stru + * For type 3: ref & app tag is all 'f's + * For type 0,1,2: app tag is all 'f's + */ +- if ((a_app_tag == T10_PI_APP_ESCAPE) && +- ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || +- (a_ref_tag == T10_PI_REF_ESCAPE))) { ++ if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && ++ (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || ++ a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { + uint32_t blocks_done, resid; + sector_t lba_s = scsi_get_lba(cmd); + +@@ -2771,6 +2766,8 @@ qla2x00_status_entry(scsi_qla_host_t *vh + sense_len = par_sense_len = rsp_info_len = resid_len = + fw_resid_len = 0; + if (IS_FWI2_CAPABLE(ha)) { ++ u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay); ++ + if (scsi_status & SS_SENSE_LEN_VALID) + sense_len = le32_to_cpu(sts24->sense_len); + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) +@@ -2785,11 +2782,11 @@ qla2x00_status_entry(scsi_qla_host_t *vh + ox_id = le16_to_cpu(sts24->ox_id); + par_sense_len = sizeof(sts24->data); + /* Valid values of the retry delay timer are 0x1-0xffef */ +- if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) { +- retry_delay = sts24->retry_delay & 0x3fff; ++ if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) { ++ retry_delay = sts24_retry_delay & 0x3fff; + ql_dbg(ql_dbg_io, sp->vha, 0x3033, + "%s: scope=%#x retry_delay=%#x\n", __func__, +- sts24->retry_delay >> 14, retry_delay); ++ sts24_retry_delay >> 14, retry_delay); + } + } else { + if (scsi_status & SS_SENSE_LEN_VALID) +@@ -3179,7 +3176,7 @@ qla24xx_mbx_completion(scsi_qla_host_t * + ha->flags.mbox_int = 1; + ha->mailbox_out[0] = mb0; + mboxes >>= 1; +- wptr = (uint16_t __iomem *)®->mailbox1; ++ wptr = ®->mailbox1; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) +@@ -3203,7 +3200,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t + return; + + abt = &sp->u.iocb_cmd; +- abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); ++ abt->u.abt.comp_status = pkt->nport_handle; + sp->done(sp, 0); + } + +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -208,11 +208,11 @@ qla2x00_mailbox_command(scsi_qla_host_t + + /* Load mailbox registers. */ + if (IS_P3P_TYPE(ha)) +- optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; ++ optr = ®->isp82.mailbox_in[0]; + else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) +- optr = (uint16_t __iomem *)®->isp24.mailbox0; ++ optr = ®->isp24.mailbox0; + else +- optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); ++ optr = MAILBOX_REG(ha, ®->isp, 0); + + iptr = mcp->mb; + command = mcp->mb[0]; +@@ -222,8 +222,7 @@ qla2x00_mailbox_command(scsi_qla_host_t + "Mailbox registers (OUT):\n"); + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (IS_QLA2200(ha) && cnt == 8) +- optr = +- (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); ++ optr = MAILBOX_REG(ha, ®->isp, 8); + if (mboxes & BIT_0) { + ql_dbg(ql_dbg_mbx, vha, 0x1112, + "mbox[%d]<-0x%04x\n", cnt, *iptr); +@@ -3110,8 +3109,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *v + mc.mb[6] = MSW(MSD(stats_dma)); + mc.mb[7] = LSW(MSD(stats_dma)); + mc.mb[8] = dwords; +- mc.mb[9] = cpu_to_le16(vha->vp_idx); +- mc.mb[10] = cpu_to_le16(options); ++ mc.mb[9] = vha->vp_idx; ++ mc.mb[10] = options; + + rval = qla24xx_send_mb_cmd(vha, &mc); + +@@ -3204,7 +3203,7 @@ qla24xx_abort_command(srb_t *sp) + ql_dbg(ql_dbg_mbx, vha, 0x1090, + "Failed to complete IOCB -- completion status (%x).\n", + le16_to_cpu(abt->nport_handle)); +- if (abt->nport_handle == CS_IOCB_ERROR) ++ if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) + rval = QLA_FUNCTION_PARAMETER_ERROR; + else + rval = QLA_FUNCTION_FAILED; +@@ -4727,7 +4726,7 @@ qla82xx_set_driver_version(scsi_qla_host + mbx_cmd_t *mcp = &mc; + int i; + int len; +- uint16_t *str; ++ __le16 *str; + struct qla_hw_data *ha = vha->hw; + + if (!IS_P3P_TYPE(ha)) +@@ -4736,14 +4735,14 @@ qla82xx_set_driver_version(scsi_qla_host + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, + "Entered %s.\n", __func__); + +- str = (uint16_t *)version; ++ str = (__force __le16 *)version; + len = strlen(version); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; + mcp->out_mb = MBX_1|MBX_0; + for (i = 4; i < 16 && len; i++, str++, len -= 2) { +- mcp->mb[i] = cpu_to_le16p(str); ++ mcp->mb[i] = le16_to_cpup(str); + mcp->out_mb |= 1<loop_id); ++ mc.mb[1] = fcport->loop_id; + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); +- mc.mb[9] = cpu_to_le16(vha->vp_idx); +- mc.mb[10] = cpu_to_le16((uint16_t)opt); ++ mc.mb[9] = vha->vp_idx; ++ mc.mb[10] = opt; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { +@@ -6587,7 +6586,7 @@ int qla24xx_gidlist_wait(struct scsi_qla + mc.mb[6] = MSW(MSD(id_list_dma)); + mc.mb[7] = LSW(MSD(id_list_dma)); + mc.mb[8] = 0; +- mc.mb[9] = cpu_to_le16(vha->vp_idx); ++ mc.mb[9] = vha->vp_idx; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { +@@ -6613,8 +6612,8 @@ int qla27xx_set_zio_threshold(scsi_qla_h + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; +- mcp->mb[1] = cpu_to_le16(1); +- mcp->mb[2] = cpu_to_le16(value); ++ mcp->mb[1] = 1; ++ mcp->mb[2] = value; + mcp->out_mb = MBX_2 | MBX_1 | MBX_0; + mcp->in_mb = MBX_2 | MBX_0; + mcp->tov = MBX_TOV_SECONDS; +@@ -6639,7 +6638,7 @@ int qla27xx_get_zio_threshold(scsi_qla_h + + memset(mcp->mb, 0, sizeof(mcp->mb)); + mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; +- mcp->mb[1] = cpu_to_le16(0); ++ mcp->mb[1] = 0; + mcp->out_mb = MBX_1 | MBX_0; + mcp->in_mb = MBX_2 | MBX_0; + mcp->tov = MBX_TOV_SECONDS; +--- a/drivers/scsi/qla2xxx/qla_mr.c ++++ b/drivers/scsi/qla2xxx/qla_mr.c +@@ -3202,7 +3202,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mg + memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); + tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; + tm_iocb.entry_count = 1; +- tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle)); ++ tm_iocb.handle = make_handle(req->id, sp->handle); + tm_iocb.reserved_0 = 0; + tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); + tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); +@@ -3228,9 +3228,8 @@ qlafx00_abort_iocb(srb_t *sp, struct abo + memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); + abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; + abt_iocb.entry_count = 1; +- abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle)); +- abt_iocb.abort_handle = +- cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl)); ++ abt_iocb.handle = make_handle(req->id, sp->handle); ++ abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl); + abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); + abt_iocb.req_que_no = cpu_to_le16(req->id); + +@@ -3251,7 +3250,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fx + + memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); + fx_iocb.entry_type = FX00_IOCB_TYPE; +- fx_iocb.handle = cpu_to_le32(sp->handle); ++ fx_iocb.handle = sp->handle; + fx_iocb.entry_count = entry_cnt; + + if (sp->type == SRB_FXIOCB_DCMD) { +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -138,7 +138,7 @@ static void qla_nvme_release_fcp_cmd_kre + priv->sp = NULL; + sp->priv = NULL; + if (priv->comp_status == QLA_SUCCESS) { +- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; ++ fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + } else { + fd->rcv_rsplen = 0; + fd->transferred_length = 0; +@@ -437,11 +437,11 @@ static inline int qla2x00_start_nvme_mq( + + /* No data transfer how do we check buffer len == 0?? */ + if (fd->io_dir == NVMEFC_FCP_READ) { +- cmd_pkt->control_flags = CF_READ_DATA; ++ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + vha->qla_stats.input_bytes += fd->payload_length; + vha->qla_stats.input_requests++; + } else if (fd->io_dir == NVMEFC_FCP_WRITE) { +- cmd_pkt->control_flags = CF_WRITE_DATA; ++ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + if ((vha->flags.nvme_first_burst) && + (sp->fcport->nvme_prli_service_param & + NVME_PRLI_SP_FIRST_BURST)) { +@@ -449,7 +449,7 @@ static inline int qla2x00_start_nvme_mq( + sp->fcport->nvme_first_burst_size) || + (sp->fcport->nvme_first_burst_size == 0)) + cmd_pkt->control_flags |= +- CF_NVME_FIRST_BURST_ENABLE; ++ cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); + } + vha->qla_stats.output_bytes += fd->payload_length; + vha->qla_stats.output_requests++; +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -1561,14 +1561,14 @@ qla82xx_get_table_desc(const u8 *unirom, + uint32_t i; + struct qla82xx_uri_table_desc *directory = + (struct qla82xx_uri_table_desc *)&unirom[0]; +- __le32 offset; +- __le32 tab_type; +- __le32 entries = cpu_to_le32(directory->num_entries); ++ uint32_t offset; ++ uint32_t tab_type; ++ uint32_t entries = le32_to_cpu(directory->num_entries); + + for (i = 0; i < entries; i++) { +- offset = cpu_to_le32(directory->findex) + +- (i * cpu_to_le32(directory->entry_size)); +- tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); ++ offset = le32_to_cpu(directory->findex) + ++ (i * le32_to_cpu(directory->entry_size)); ++ tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8); + + if (tab_type == section) + return (struct qla82xx_uri_table_desc *)&unirom[offset]; +@@ -1582,16 +1582,17 @@ qla82xx_get_data_desc(struct qla_hw_data + u32 section, u32 idx_offset) + { + const u8 *unirom = ha->hablob->fw->data; +- int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); ++ int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] + ++ idx_offset); + struct qla82xx_uri_table_desc *tab_desc = NULL; +- __le32 offset; ++ uint32_t offset; + + tab_desc = qla82xx_get_table_desc(unirom, section); + if (!tab_desc) + return NULL; + +- offset = cpu_to_le32(tab_desc->findex) + +- (cpu_to_le32(tab_desc->entry_size) * idx); ++ offset = le32_to_cpu(tab_desc->findex) + ++ (le32_to_cpu(tab_desc->entry_size) * idx); + + return (struct qla82xx_uri_data_desc *)&unirom[offset]; + } +@@ -1606,7 +1607,7 @@ qla82xx_get_bootld_offset(struct qla_hw_ + uri_desc = qla82xx_get_data_desc(ha, + QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); + if (uri_desc) +- offset = cpu_to_le32(uri_desc->findex); ++ offset = le32_to_cpu(uri_desc->findex); + } + + return (u8 *)&ha->hablob->fw->data[offset]; +@@ -1620,7 +1621,7 @@ static u32 qla82xx_get_fw_size(struct ql + uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, + QLA82XX_URI_FIRMWARE_IDX_OFF); + if (uri_desc) +- return cpu_to_le32(uri_desc->size); ++ return le32_to_cpu(uri_desc->size); + } + + return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); +@@ -1636,7 +1637,7 @@ qla82xx_get_fw_offs(struct qla_hw_data * + uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, + QLA82XX_URI_FIRMWARE_IDX_OFF); + if (uri_desc) +- offset = cpu_to_le32(uri_desc->findex); ++ offset = le32_to_cpu(uri_desc->findex); + } + + return (u8 *)&ha->hablob->fw->data[offset]; +@@ -1847,8 +1848,8 @@ qla82xx_set_product_offset(struct qla_hw + struct qla82xx_uri_table_desc *ptab_desc = NULL; + const uint8_t *unirom = ha->hablob->fw->data; + uint32_t i; +- __le32 entries; +- __le32 flags, file_chiprev, offset; ++ uint32_t entries; ++ uint32_t flags, file_chiprev, offset; + uint8_t chiprev = ha->chip_revision; + /* Hardcoding mn_present flag for P3P */ + int mn_present = 0; +@@ -1859,14 +1860,14 @@ qla82xx_set_product_offset(struct qla_hw + if (!ptab_desc) + return -1; + +- entries = cpu_to_le32(ptab_desc->num_entries); ++ entries = le32_to_cpu(ptab_desc->num_entries); + + for (i = 0; i < entries; i++) { +- offset = cpu_to_le32(ptab_desc->findex) + +- (i * cpu_to_le32(ptab_desc->entry_size)); +- flags = cpu_to_le32(*((int *)&unirom[offset] + ++ offset = le32_to_cpu(ptab_desc->findex) + ++ (i * le32_to_cpu(ptab_desc->entry_size)); ++ flags = le32_to_cpu(*((__le32 *)&unirom[offset] + + QLA82XX_URI_FLAGS_OFF)); +- file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + ++ file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] + + QLA82XX_URI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; +@@ -2549,8 +2550,8 @@ qla82xx_start_firmware(scsi_qla_host_t * + return qla82xx_check_rcvpeg_state(ha); + } + +-static uint32_t * +-qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ++static __le32 * ++qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, + uint32_t length) + { + uint32_t i; +@@ -2675,13 +2676,13 @@ qla82xx_read_optrom_data(struct scsi_qla + uint32_t offset, uint32_t length) + { + scsi_block_requests(vha->host); +- qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); ++ qla82xx_read_flash_data(vha, buf, offset, length); + scsi_unblock_requests(vha->host); + return buf; + } + + static int +-qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, ++qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr, + uint32_t faddr, uint32_t dwords) + { + int ret; +@@ -2758,7 +2759,7 @@ qla82xx_write_flash_data(struct scsi_qla + } + + ret = qla82xx_write_flash_dword(ha, faddr, +- cpu_to_le32(*dwptr)); ++ le32_to_cpu(*dwptr)); + if (ret) { + ql_dbg(ql_dbg_p3p, vha, 0xb020, + "Unable to program flash address=%x data=%x.\n", +@@ -3724,7 +3725,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host + /* Minidump related functions */ + static int + qla82xx_minidump_process_control(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + struct qla82xx_md_entry_crb *crb_entry; +@@ -3841,12 +3842,12 @@ qla82xx_minidump_process_control(scsi_ql + + static void + qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_rdocm *ocm_hdr; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; +@@ -3863,12 +3864,12 @@ qla82xx_minidump_process_rdocm(scsi_qla_ + + static void + qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla82xx_md_entry_mux *mux_hdr; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; +@@ -3889,12 +3890,12 @@ qla82xx_minidump_process_rdmux(scsi_qla_ + + static void + qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_crb *crb_hdr; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; +@@ -3912,7 +3913,7 @@ qla82xx_minidump_process_rdcrb(scsi_qla_ + + static int + qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; +@@ -3921,7 +3922,7 @@ qla82xx_minidump_process_l2tag(scsi_qla_ + uint32_t c_value_w, c_value_r; + struct qla82xx_md_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; +@@ -3971,14 +3972,14 @@ qla82xx_minidump_process_l2tag(scsi_qla_ + + static void + qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla82xx_md_entry_cache *cache_hdr; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; +@@ -4006,14 +4007,14 @@ qla82xx_minidump_process_l1cache(scsi_ql + + static void + qla82xx_minidump_process_queue(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla82xx_md_entry_queue *q_hdr; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; +@@ -4036,13 +4037,13 @@ qla82xx_minidump_process_queue(scsi_qla_ + + static void + qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value; + uint32_t i, loop_cnt; + struct qla82xx_md_entry_rdrom *rom_hdr; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; + r_addr = rom_hdr->read_addr; +@@ -4062,7 +4063,7 @@ qla82xx_minidump_process_rdrom(scsi_qla_ + + static int + qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, +- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) ++ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + { + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value, r_data; +@@ -4070,7 +4071,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_ + struct qla82xx_md_entry_rdmem *m_hdr; + unsigned long flags; + int rval = QLA_FUNCTION_FAILED; +- uint32_t *data_ptr = *d_ptr; ++ __le32 *data_ptr = *d_ptr; + + m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; +@@ -4163,12 +4164,12 @@ qla82xx_md_collect(scsi_qla_host_t *vha) + int no_entry_hdr = 0; + qla82xx_md_entry_hdr_t *entry_hdr; + struct qla82xx_md_template_hdr *tmplt_hdr; +- uint32_t *data_ptr; ++ __le32 *data_ptr; + uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; + int i = 0, rval = QLA_FUNCTION_FAILED; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; +- data_ptr = (uint32_t *)ha->md_dump; ++ data_ptr = ha->md_dump; + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xb037, +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -5777,7 +5777,8 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_ + if (!pdb) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Failed allocate pdb\n", __func__); +- } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) { ++ } else if (qla24xx_get_port_database(vha, ++ le16_to_cpu(purex->nport_handle), pdb)) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Failed get pdb sid=%x\n", __func__, sid); + } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && +@@ -5971,7 +5972,7 @@ void qla24xx_process_purex_rdp(struct sc + rsp_els->entry_status = 0; + rsp_els->handle = 0; + rsp_els->nport_handle = purex->nport_handle; +- rsp_els->tx_dsd_count = 1; ++ rsp_els->tx_dsd_count = cpu_to_le16(1); + rsp_els->vp_index = purex->vp_idx; + rsp_els->sof_type = EST_SOFI3; + rsp_els->rx_xchg_address = purex->rx_xchg_addr; +@@ -5982,7 +5983,7 @@ void qla24xx_process_purex_rdp(struct sc + rsp_els->d_id[1] = purex->s_id[1]; + rsp_els->d_id[2] = purex->s_id[2]; + +- rsp_els->control_flags = EPD_ELS_ACC; ++ rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); + rsp_els->rx_byte_count = 0; + rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); + +@@ -5994,8 +5995,8 @@ void qla24xx_process_purex_rdp(struct sc + + /* Prepare Response Payload */ + rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ +- rsp_payload->hdr.len = cpu_to_be32( +- rsp_els->tx_byte_count - sizeof(rsp_payload->hdr)); ++ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - ++ sizeof(rsp_payload->hdr)); + + /* Link service Request Info Descriptor */ + rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); +@@ -6045,7 +6046,7 @@ void qla24xx_process_purex_rdp(struct sc + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); + if (!rval) { +- uint16_t *trx = (uint16_t *)sfp; /* already be16 */ ++ __be16 *trx = (__force __be16 *)sfp; /* already be16 */ + rsp_payload->sfp_diag_desc.temperature = trx[0]; + rsp_payload->sfp_diag_desc.vcc = trx[1]; + rsp_payload->sfp_diag_desc.tx_bias = trx[2]; +@@ -6072,17 +6073,17 @@ void qla24xx_process_purex_rdp(struct sc + rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); + if (!rval) { + rsp_payload->ls_err_desc.link_fail_cnt = +- cpu_to_be32(stat->link_fail_cnt); ++ cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); + rsp_payload->ls_err_desc.loss_sync_cnt = +- cpu_to_be32(stat->loss_sync_cnt); ++ cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); + rsp_payload->ls_err_desc.loss_sig_cnt = +- cpu_to_be32(stat->loss_sig_cnt); ++ cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); + rsp_payload->ls_err_desc.prim_seq_err_cnt = +- cpu_to_be32(stat->prim_seq_err_cnt); ++ cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); + rsp_payload->ls_err_desc.inval_xmit_word_cnt = +- cpu_to_be32(stat->inval_xmit_word_cnt); ++ cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); + rsp_payload->ls_err_desc.inval_crc_cnt = +- cpu_to_be32(stat->inval_crc_cnt); ++ cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); + rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; + } + } +@@ -6154,7 +6155,7 @@ void qla24xx_process_purex_rdp(struct sc + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); + if (!rval) { +- uint16_t *trx = (uint16_t *)sfp; /* already be16 */ ++ __be16 *trx = (__force __be16 *)sfp; /* already be16 */ + + /* Optical Element Descriptor, Temperature */ + rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; +--- a/drivers/scsi/qla2xxx/qla_sup.c ++++ b/drivers/scsi/qla2xxx/qla_sup.c +@@ -183,7 +183,7 @@ qla2x00_nv_deselect(struct qla_hw_data * + * @data: word to program + */ + static void +-qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) ++qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) + { + int count; + uint16_t word; +@@ -202,7 +202,7 @@ qla2x00_write_nvram_word(struct qla_hw_d + + /* Write data */ + nv_cmd = (addr << 16) | NV_WRITE_OP; +- nv_cmd |= data; ++ nv_cmd |= (__force u16)data; + nv_cmd <<= 5; + for (count = 0; count < 27; count++) { + if (nv_cmd & BIT_31) +@@ -241,7 +241,7 @@ qla2x00_write_nvram_word(struct qla_hw_d + + static int + qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr, +- uint16_t data, uint32_t tmo) ++ __le16 data, uint32_t tmo) + { + int ret, count; + uint16_t word; +@@ -261,7 +261,7 @@ qla2x00_write_nvram_word_tmo(struct qla_ + + /* Write data */ + nv_cmd = (addr << 16) | NV_WRITE_OP; +- nv_cmd |= data; ++ nv_cmd |= (__force u16)data; + nv_cmd <<= 5; + for (count = 0; count < 27; count++) { + if (nv_cmd & BIT_31) +@@ -308,7 +308,7 @@ qla2x00_clear_nvram_protection(struct ql + int ret, stat; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint32_t word, wait_cnt; +- uint16_t wprot, wprot_old; ++ __le16 wprot, wprot_old; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + /* Clear NVRAM write protection. */ +@@ -318,7 +318,7 @@ qla2x00_clear_nvram_protection(struct ql + stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base, + cpu_to_le16(0x1234), 100000); + wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); +- if (stat != QLA_SUCCESS || wprot != 0x1234) { ++ if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) { + /* Write enable. */ + qla2x00_nv_write(ha, NVR_DATA_OUT); + qla2x00_nv_write(ha, 0); +@@ -549,7 +549,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t * + { + const char *loc, *locations[] = { "DEF", "PCI" }; + uint32_t pcihdr, pcids; +- uint16_t cnt, chksum, *wptr; ++ uint16_t cnt, chksum; ++ __le16 *wptr; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct qla_flt_location *fltl = (void *)req->ring; +@@ -610,7 +611,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t * + if (memcmp(fltl->sig, "QFLT", 4)) + goto end; + +- wptr = (uint16_t *)req->ring; ++ wptr = (__force __le16 *)req->ring; + cnt = sizeof(*fltl) / sizeof(*wptr); + for (chksum = 0; cnt--; wptr++) + chksum += le16_to_cpu(*wptr); +@@ -671,7 +672,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vh + uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *region = &flt->region[0]; +- uint16_t *wptr, cnt, chksum; ++ __le16 *wptr; ++ uint16_t cnt, chksum; + uint32_t start; + + /* Assign FCP prio region since older adapters may not have FLT, or +@@ -681,7 +683,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vh + fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; + + ha->flt_region_flt = flt_addr; +- wptr = (uint16_t *)ha->flt; ++ wptr = (__force __le16 *)ha->flt; + ha->isp_ops->read_optrom(vha, flt, flt_addr << 2, + (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE)); + +@@ -949,7 +951,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vh + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + uint16_t cnt, chksum; +- uint16_t *wptr = (uint16_t *)req->ring; ++ __le16 *wptr = (__force __le16 *)req->ring; + struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring; + uint8_t man_id, flash_id; + uint16_t mid = 0, fid = 0; +@@ -1042,14 +1044,14 @@ static void + qla2xxx_get_idc_param(scsi_qla_host_t *vha) + { + #define QLA82XX_IDC_PARAM_ADDR 0x003e885c +- uint32_t *wptr; ++ __le32 *wptr; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + if (!(IS_P3P_TYPE(ha))) + return; + +- wptr = (uint32_t *)req->ring; ++ wptr = (__force __le32 *)req->ring; + ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8); + + if (*wptr == cpu_to_le32(0xffffffff)) { +@@ -1095,7 +1097,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t + { + #define NPIV_CONFIG_SIZE (16*1024) + void *data; +- uint16_t *wptr; ++ __le16 *wptr; + uint16_t cnt, chksum; + int i; + struct qla_npiv_header hdr; +@@ -1265,7 +1267,7 @@ qla24xx_erase_sector(scsi_qla_host_t *vh + } + + static int +-qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ++qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, + uint32_t dwords) + { + int ret; +@@ -1352,7 +1354,7 @@ qla24xx_write_flash_data(scsi_qla_host_t + + /* Slow write */ + ret = qla24xx_write_flash_dword(ha, +- flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); ++ flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); + if (ret) { + ql_dbg(ql_dbg_user, vha, 0x7006, + "Failed slopw write %x (%x)\n", faddr, *dwptr); +@@ -1379,11 +1381,11 @@ qla2x00_read_nvram_data(scsi_qla_host_t + uint32_t bytes) + { + uint32_t i; +- uint16_t *wptr; ++ __le16 *wptr; + struct qla_hw_data *ha = vha->hw; + + /* Word reads to NVRAM via registers. */ +- wptr = (uint16_t *)buf; ++ wptr = buf; + qla2x00_lock_nvram_access(ha); + for (i = 0; i < bytes >> 1; i++, naddr++) + wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha, +@@ -1456,7 +1458,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t + { + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +- uint32_t *dwptr = buf; ++ __le32 *dwptr = buf; + uint32_t i; + int ret; + +@@ -1478,7 +1480,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t + naddr = nvram_data_addr(ha, naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { +- if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) { ++ if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) { + ql_dbg(ql_dbg_user, vha, 0x709a, + "Unable to program nvram address=%x data=%x.\n", + naddr, *dwptr); +@@ -2662,7 +2664,7 @@ qla28xx_get_flash_region(struct scsi_qla + + cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); + for (; cnt; cnt--, flt_reg++) { +- if (flt_reg->start == start) { ++ if (le32_to_cpu(flt_reg->start) == start) { + memcpy((uint8_t *)region, flt_reg, + sizeof(struct qla_flt_region)); + rval = QLA_SUCCESS; +@@ -2691,7 +2693,7 @@ qla28xx_write_flash_data(scsi_qla_host_t + struct qla_flt_region region; + bool reset_to_rom = false; + uint32_t risc_size, risc_attr = 0; +- uint32_t *fw_array = NULL; ++ __be32 *fw_array = NULL; + + /* Retrieve region info - must be a start address passed in */ + rval = qla28xx_get_flash_region(vha, offset, ®ion); +@@ -2722,12 +2724,12 @@ qla28xx_write_flash_data(scsi_qla_host_t + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Region %x is secure\n", region.code); + +- switch (region.code) { ++ switch (le16_to_cpu(region.code)) { + case FLT_REG_FW: + case FLT_REG_FW_SEC_27XX: + case FLT_REG_MPI_PRI_28XX: + case FLT_REG_MPI_SEC_28XX: +- fw_array = dwptr; ++ fw_array = (__force __be32 *)dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); +@@ -2761,7 +2763,7 @@ qla28xx_write_flash_data(scsi_qla_host_t + + case FLT_REG_PEP_PRI_28XX: + case FLT_REG_PEP_SEC_28XX: +- fw_array = dwptr; ++ fw_array = (__force __be32 *)dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); +@@ -2892,7 +2894,8 @@ qla28xx_write_flash_data(scsi_qla_host_t + if (region.attribute && buf_size_without_sfub) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Sending Secure Flash MB Cmd\n"); +- rval = qla28xx_secure_flash_update(vha, 0, region.code, ++ rval = qla28xx_secure_flash_update(vha, 0, ++ le16_to_cpu(region.code), + buf_size_without_sfub, sfub_dma, + sizeof(struct secure_flash_update_block) >> 2); + if (rval != QLA_SUCCESS) { +@@ -2981,11 +2984,11 @@ qla24xx_write_optrom_data(struct scsi_ql + + /* Go with write. */ + if (IS_QLA28XX(ha)) +- rval = qla28xx_write_flash_data(vha, (uint32_t *)buf, +- offset >> 2, length >> 2); ++ rval = qla28xx_write_flash_data(vha, buf, offset >> 2, ++ length >> 2); + else +- rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, +- offset >> 2, length >> 2); ++ rval = qla24xx_write_flash_data(vha, buf, offset >> 2, ++ length >> 2); + + clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); + scsi_unblock_requests(vha->host); +@@ -3513,7 +3516,8 @@ qla24xx_get_flash_version(scsi_qla_host_ + ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); + } else { + for (i = 0; i < 4; i++) +- ha->fw_revision[i] = be32_to_cpu(dcode[4+i]); ++ ha->fw_revision[i] = ++ be32_to_cpu((__force __be32)dcode[4+i]); + ql_dbg(ql_dbg_init, vha, 0x0060, + "Firmware revision (flash) %u.%u.%u (%x).\n", + ha->fw_revision[0], ha->fw_revision[1], +@@ -3537,7 +3541,8 @@ qla24xx_get_flash_version(scsi_qla_host_ + } + + for (i = 0; i < 4; i++) +- ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]); ++ ha->gold_fw_version[i] = ++ be32_to_cpu((__force __be32)dcode[4+i]); + + return ret; + } +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -387,7 +387,7 @@ static bool qlt_24xx_atio_pkt_all_vps(st + qlt_issue_marker(vha, ha_locked); + + if ((entry->u.isp24.vp_index != 0xFF) && +- (entry->u.isp24.nport_handle != 0xFFFF)) { ++ (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { + host = qlt_find_host_by_vp_idx(vha, + entry->u.isp24.vp_index); + if (unlikely(!host)) { +@@ -1706,7 +1706,7 @@ static void qlt_send_notify_ack(struct q + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & +- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); ++ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; +@@ -1734,7 +1734,8 @@ static int qlt_build_abts_resp_iocb(stru + struct scsi_qla_host *vha = mcmd->vha; + struct qla_hw_data *ha = vha->hw; + struct abts_resp_to_24xx *resp; +- uint32_t f_ctl, h; ++ __le32 f_ctl; ++ uint32_t h; + uint8_t *p; + int rc; + struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; +@@ -1791,7 +1792,7 @@ static int qlt_build_abts_resp_iocb(stru + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; + resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; + resp->payload.ba_acct.low_seq_cnt = 0x0000; +- resp->payload.ba_acct.high_seq_cnt = 0xFFFF; ++ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); + resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; + resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; + } else { +@@ -1823,7 +1824,7 @@ static void qlt_24xx_send_abts_resp(stru + struct scsi_qla_host *vha = qpair->vha; + struct qla_hw_data *ha = vha->hw; + struct abts_resp_to_24xx *resp; +- uint32_t f_ctl; ++ __le32 f_ctl; + uint8_t *p; + + ql_dbg(ql_dbg_tgt, vha, 0xe006, +@@ -1866,7 +1867,7 @@ static void qlt_24xx_send_abts_resp(stru + resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; + resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; + resp->payload.ba_acct.low_seq_cnt = 0x0000; +- resp->payload.ba_acct.high_seq_cnt = 0xFFFF; ++ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); + resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; + resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; + } else { +@@ -2039,7 +2040,7 @@ static void qlt_do_tmr_work(struct work_ + + switch (mcmd->tmr_func) { + case QLA_TGT_ABTS: +- tag = mcmd->orig_iocb.abts.exchange_addr_to_abort; ++ tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); + break; + default: + tag = 0; +@@ -2119,7 +2120,7 @@ static int __qlt_24xx_handle_abts(struct + struct qla_tgt_cmd *abort_cmd; + + abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, +- abts->exchange_addr_to_abort); ++ le32_to_cpu(abts->exchange_addr_to_abort)); + if (abort_cmd && abort_cmd->qpair) { + mcmd->qpair = abort_cmd->qpair; + mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; +@@ -2142,7 +2143,7 @@ static void qlt_24xx_handle_abts(struct + { + struct qla_hw_data *ha = vha->hw; + struct fc_port *sess; +- uint32_t tag = abts->exchange_addr_to_abort; ++ uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); + be_id_t s_id; + int rc; + unsigned long flags; +@@ -2232,7 +2233,7 @@ static void qlt_24xx_send_task_mgmt_ctio + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; +- ctio->nport_handle = mcmd->sess->loop_id; ++ ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = ha->vp_idx; + ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); +@@ -2289,7 +2290,7 @@ void qlt_send_resp_ctio(struct qla_qpair + ctio->entry_type = CTIO_TYPE7; + ctio->entry_count = 1; + ctio->handle = QLA_TGT_SKIP_HANDLE; +- ctio->nport_handle = cmd->sess->loop_id; ++ ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); + ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio->vp_index = vha->vp_idx; + ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); +@@ -2849,10 +2850,14 @@ static void qlt_24xx_init_ctio_to_isp(st + cpu_to_le16(SS_SENSE_LEN_VALID); + ctio->u.status1.sense_length = + cpu_to_le16(prm->sense_buffer_len); +- for (i = 0; i < prm->sense_buffer_len/4; i++) +- ((uint32_t *)ctio->u.status1.sense_data)[i] = +- cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); ++ for (i = 0; i < prm->sense_buffer_len/4; i++) { ++ uint32_t v; + ++ v = get_unaligned_be32( ++ &((uint32_t *)prm->sense_buffer)[i]); ++ put_unaligned_le32(v, ++ &((uint32_t *)ctio->u.status1.sense_data)[i]); ++ } + qlt_print_dif_err(prm); + + } else { +@@ -3123,7 +3128,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair + else if (cmd->dma_data_direction == DMA_FROM_DEVICE) + pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); + +- pkt->dseg_count = prm->tot_dsds; ++ pkt->dseg_count = cpu_to_le16(prm->tot_dsds); + /* Fibre channel byte count */ + pkt->transfer_length = cpu_to_le32(transfer_length); + +@@ -3145,7 +3150,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair + qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); + + put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); +- pkt->crc_context_len = CRC_CONTEXT_LEN_FW; ++ pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); + + if (!bundling) { + cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; +@@ -3582,7 +3587,7 @@ static int __qlt_send_term_imm_notif(str + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & +- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); ++ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + + /* terminate */ +@@ -3656,7 +3661,7 @@ static int __qlt_send_term_exchange(stru + + ctio24 = (struct ctio7_to_24xx *)pkt; + ctio24->entry_type = CTIO_TYPE7; +- ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; ++ ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); + ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->vp_index = vha->vp_idx; + ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); +@@ -4119,7 +4124,7 @@ static void __qlt_do_work(struct qla_tgt + + spin_lock_init(&cmd->cmd_lock); + cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; +- cmd->se_cmd.tag = atio->u.isp24.exchange_addr; ++ cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); + + if (atio->u.isp24.fcp_cmnd.rddata && + atio->u.isp24.fcp_cmnd.wrdata) { +@@ -5311,7 +5316,7 @@ static int __qlt_send_busy(struct qla_qp + + ctio24 = (struct ctio7_to_24xx *)pkt; + ctio24->entry_type = CTIO_TYPE7; +- ctio24->nport_handle = sess->loop_id; ++ ctio24->nport_handle = cpu_to_le16(sess->loop_id); + ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); + ctio24->vp_index = vha->vp_idx; + ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); +@@ -5324,13 +5329,14 @@ static int __qlt_send_busy(struct qla_qp + * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, + * if the explicit conformation is used. + */ +- ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); ++ ctio24->u.status1.ox_id = ++ cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + ctio24->u.status1.scsi_status = cpu_to_le16(status); + +- ctio24->u.status1.residual = get_datalen_for_atio(atio); ++ ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + + if (ctio24->u.status1.residual != 0) +- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; ++ ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); + + /* Memory Barrier */ + wmb(); +@@ -5559,7 +5565,7 @@ static void qlt_24xx_atio_pkt(struct scs + switch (atio->u.raw.entry_type) { + case ATIO_TYPE7: + if (unlikely(atio->u.isp24.exchange_addr == +- ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { ++ cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { + ql_dbg(ql_dbg_io, vha, 0x3065, + "qla_target(%d): ATIO_TYPE7 " + "received with UNKNOWN exchange address, " +@@ -5722,8 +5728,8 @@ static void qlt_handle_abts_completion(s + entry->compl_status); + + if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { +- if ((entry->error_subcode1 == 0x1E) && +- (entry->error_subcode2 == 0)) { ++ if (le32_to_cpu(entry->error_subcode1) == 0x1E && ++ le32_to_cpu(entry->error_subcode2) == 0) { + if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { + ha->tgt.tgt_ops->free_mcmd(mcmd); + return; +@@ -5937,8 +5943,7 @@ void qlt_async_event(uint16_t code, stru + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, + "qla_target(%d): Async LOOP_UP occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, +- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), +- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); ++ mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + if (tgt->link_reinit_iocb_pending) { + qlt_send_notify_ack(ha->base_qpair, + &tgt->link_reinit_iocb, +@@ -5955,18 +5960,16 @@ void qlt_async_event(uint16_t code, stru + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, + "qla_target(%d): Async event %#x occurred " + "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, +- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), +- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); ++ mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + break; + + case MBA_REJECTED_FCP_CMD: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, + "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", + vha->vp_idx, +- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), +- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); ++ mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + +- if (le16_to_cpu(mailbox[3]) == 1) { ++ if (mailbox[3] == 1) { + /* exchange starvation. */ + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { +@@ -5990,10 +5993,9 @@ void qlt_async_event(uint16_t code, stru + "qla_target(%d): Port update async event %#x " + "occurred: updating the ports database (m[0]=%x, m[1]=%x, " + "m[2]=%x, m[3]=%x)", vha->vp_idx, code, +- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), +- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); ++ mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + +- login_code = le16_to_cpu(mailbox[2]); ++ login_code = mailbox[2]; + if (login_code == 0x4) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, + "Async MB 2: Got PLOGI Complete\n"); +@@ -6742,7 +6744,7 @@ qlt_init_atio_q_entries(struct scsi_qla_ + return; + + for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { +- pkt->u.raw.signature = ATIO_PROCESSED; ++ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); + pkt++; + } + +@@ -6777,7 +6779,7 @@ qlt_24xx_process_atio_queue(struct scsi_ + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + &pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), +- le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); ++ pkt->u.isp24.exchange_addr, pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(ha->base_qpair, NULL, pkt, +@@ -6795,7 +6797,7 @@ qlt_24xx_process_atio_queue(struct scsi_ + } else + ha->tgt.atio_ring_ptr++; + +- pkt->u.raw.signature = ATIO_PROCESSED; ++ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); + pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; + } + wmb(); +@@ -6824,10 +6826,10 @@ qlt_24xx_config_rings(struct scsi_qla_ho + if (IS_QLA2071(ha)) { + /* 4 ports Baker: Enable Interrupt Handshake */ + icb->msix_atio = 0; +- icb->firmware_options_2 |= BIT_26; ++ icb->firmware_options_2 |= cpu_to_le32(BIT_26); + } else { + icb->msix_atio = cpu_to_le16(msix->entry); +- icb->firmware_options_2 &= ~BIT_26; ++ icb->firmware_options_2 &= cpu_to_le32(~BIT_26); + } + ql_dbg(ql_dbg_init, vha, 0xf072, + "Registering ICB vector 0x%x for atio que.\n", +@@ -6837,7 +6839,7 @@ qlt_24xx_config_rings(struct scsi_qla_ho + /* INTx|MSI */ + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + icb->msix_atio = 0; +- icb->firmware_options_2 |= BIT_26; ++ icb->firmware_options_2 |= cpu_to_le32(BIT_26); + ql_dbg(ql_dbg_init, vha, 0xf072, + "%s: Use INTx for ATIOQ.\n", __func__); + } +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -919,9 +919,9 @@ static void + qla27xx_firmware_info(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp) + { +- tmp->firmware_version[0] = vha->hw->fw_major_version; +- tmp->firmware_version[1] = vha->hw->fw_minor_version; +- tmp->firmware_version[2] = vha->hw->fw_subminor_version; ++ tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version); ++ tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version); ++ tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version); + tmp->firmware_version[3] = cpu_to_le32( + vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes); + tmp->firmware_version[4] = cpu_to_le32( diff --git a/patches.suse/scsi-qla2xxx-Fix-failure-message-in-qlt_disable_vha.patch b/patches.suse/scsi-qla2xxx-Fix-failure-message-in-qlt_disable_vha.patch new file mode 100644 index 0000000..15fd6ef --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-failure-message-in-qlt_disable_vha.patch @@ -0,0 +1,64 @@ +From: Viacheslav Dubeyko +Date: Wed, 22 Apr 2020 13:51:51 +0300 +Subject: scsi: qla2xxx: Fix failure message in qlt_disable_vha() +Patch-mainline: v5.8-rc1 +Git-commit: 6b3d16f9875e181412401b8ab445dce83bd6fedc +References: bsc#1171688 bsc#1174003 + +The following sequence of commands result in an incorrect failure message +being printed: + +echo 0x7fffffff > /sys/module/qla2xxx/parameters/logging +modprobe target_core_mod +modprobe tcm_qla2xxx +mkdir /sys/kernel/config/target/qla2xxx +mkdir /sys/kernel/config/target/qla2xxx/ +mkdir /sys/kernel/config/target/qla2xxx//tpgt_1 +echo 1 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable +echo 0 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable + +qla2xxx [0001:00:02.0]-e881:1: qla2x00_wait_for_hba_online() failed + +The reason of this message is the QLA_FUNCTION_FAILED code that +qla2x00_wait_for_hba_online() returns. However, qlt_disable_vha() expects +that adapter is offlined and QLA_FUNCTION_FAILED informs about the offline +state of the adapter. + +The qla2x00_abort_isp() function finishes the execution at the point of +checking the adapter's mode (for example, qla_tgt_mode_enabled()) because +of the qlt_disable_vha() calls qlt_clear_mode() method. It means that +qla2x00_abort_isp() keeps vha->flags.online is equal to zero. Finally, +qla2x00_wait_for_hba_online() checks the state of this flag and returns +QLA_FUNCTION_FAILED error code. + +This patch changes the failure message which informs about adapter's +offline state. + +Link: https://lore.kernel.org/r/3cd0bbf3599c53b0c2a7184582d705d8b8052c8b.camel@yadro.com +Reviewed-by: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Signed-off-by: Viacheslav Dubeyko +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_target.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -6669,9 +6669,14 @@ static void qlt_disable_vha(struct scsi_ + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); ++ ++ /* ++ * We are expecting the offline state. ++ * QLA_FUNCTION_FAILED means that adapter is offline. ++ */ + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_tgt, vha, 0xe081, +- "qla2x00_wait_for_hba_online() failed\n"); ++ "adapter is offline\n"); + } + + /* diff --git a/patches.suse/scsi-qla2xxx-Fix-issue-with-adapter-s-stopping-state.patch b/patches.suse/scsi-qla2xxx-Fix-issue-with-adapter-s-stopping-state.patch new file mode 100644 index 0000000..c16567e --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-issue-with-adapter-s-stopping-state.patch @@ -0,0 +1,84 @@ +From: Viacheslav Dubeyko +Date: Wed, 22 Apr 2020 13:55:52 +0300 +Subject: scsi: qla2xxx: Fix issue with adapter's stopping state +Patch-mainline: v5.8-rc1 +Git-commit: 803e45550b11c8e43d89812356fe6f105adebdf9 +References: bsc#1171688 bsc#1174003 + +The goal of the following command sequence is to restart the adapter. +However, the tgt_stop flag remains set, indicating that the adapter is +still in stopping state even after re-enabling it. + +echo 0x7fffffff > /sys/module/qla2xxx/parameters/logging +modprobe target_core_mod +modprobe tcm_qla2xxx +mkdir /sys/kernel/config/target/qla2xxx +mkdir /sys/kernel/config/target/qla2xxx/ +mkdir /sys/kernel/config/target/qla2xxx//tpgt_1 +echo 1 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable +echo 0 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable +echo 1 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable + +kernel: PID 1396:qla_target.c:1555 qlt_stop_phase1(): tgt_stop 0x0, tgt_stopped 0x0 +kernel: qla2xxx [0001:00:02.0]-e803:1: PID 1396:qla_target.c:1567: Stopping target for host 1(c0000000033557e8) +kernel: PID 1396:qla_target.c:1579 qlt_stop_phase1(): tgt_stop 0x1, tgt_stopped 0x0 +kernel: PID 1396:qla_target.c:1266 qlt_schedule_sess_for_deletion(): tgt_stop 0x1, tgt_stopped 0x0 +kernel: qla2xxx [0001:00:02.0]-e801:1: PID 1396:qla_target.c:1316: Scheduling sess c00000002d5cd800 for deletion 21:00:00:24:ff:7f:35:c7 + +kernel: qla2xxx [0001:00:02.0]-290a:1: PID 340:qla_target.c:1187: qlt_unreg_sess sess c00000002d5cd800 for deletion 21:00:00:24:ff:7f:35:c7 + +kernel: qla2xxx [0001:00:02.0]-f801:1: PID 340:qla_target.c:1145: Unregistration of sess c00000002d5cd800 21:00:00:24:ff:7f:35:c7 finished fcp_cnt 0 +kernel: PID 340:qla_target.c:1155 qlt_free_session_done(): tgt_stop 0x1, tgt_stopped 0x0 +kernel: qla2xxx [0001:00:02.0]-4807:1: PID 346:qla_os.c:6329: ISP abort scheduled. + +kernel: qla2xxx [0001:00:02.0]-28f1:1: PID 346:qla_os.c:3956: Mark all dev lost +kernel: PID 346:qla_target.c:1266 qlt_schedule_sess_for_deletion(): tgt_stop 0x1, tgt_stopped 0x0 +kernel: qla2xxx [0001:00:02.0]-4808:1: PID 346:qla_os.c:6338: ISP abort end. + +kernel: PID 1396:qla_target.c:6812 qlt_enable_vha(): tgt_stop 0x1, tgt_stopped 0x0 + +kernel: qla2xxx [0001:00:02.0]-4807:1: PID 346:qla_os.c:6329: ISP abort scheduled. + +kernel: qla2xxx [0001:00:02.0]-4808:1: PID 346:qla_os.c:6338: ISP abort end. + +qlt_handle_cmd_for_atio() rejects the request to send commands because the +adapter is in the stopping state: + +kernel: PID 0:qla_target.c:4442 qlt_handle_cmd_for_atio(): tgt_stop 0x1, tgt_stopped 0x0 +kernel: qla2xxx [0001:00:02.0]-3861:1: PID 0:qla_target.c:4447: New command while device c000000005314600 is shutting down +kernel: qla2xxx [0001:00:02.0]-e85f:1: PID 0:qla_target.c:5728: qla_target: Unable to send command to target + +This patch calls qla_stop_phase2() in addition to qlt_stop_phase1() in +tcm_qla2xxx_tpg_enable_store() and tcm_qla2xxx_npiv_tpg_enable_store(). The +qlt_stop_phase1() marks adapter as stopping (tgt_stop == 0x1, tgt_stopped +== 0x0) but qlt_stop_phase2() marks adapter as stopped (tgt_stop == 0x0, +tgt_stopped == 0x1). + +Link: https://lore.kernel.org/r/52be1e8a3537f6c5407eae3edd4c8e08a9545ea5.camel@yadro.com +Reviewed-by: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Signed-off-by: Viacheslav Dubeyko +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/tcm_qla2xxx.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -997,6 +997,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_st + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +@@ -1161,6 +1162,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enab + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; diff --git a/patches.suse/scsi-qla2xxx-Fix-login-timeout.patch b/patches.suse/scsi-qla2xxx-Fix-login-timeout.patch new file mode 100644 index 0000000..550dc09 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-login-timeout.patch @@ -0,0 +1,64 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:07 -0700 +Subject: scsi: qla2xxx: Fix login timeout +Patch-mainline: v5.9-rc2 +Git-commit: abb31aeaa9b20680b0620b23fea5475ea4591e31 +References: bsc#1171688 bsc#1174003 + +Multipath errors were seen during failback due to login timeout. The +remote device sent LOGO, the local host tore down the session and did +relogin. The RSCN arrived indicates remote device is going through failover +after which the relogin is in a 20s timeout phase. At this point the +driver is stuck in the relogin process. Add a fix to delete the session as +part of abort/flush the login. + +Link: https://lore.kernel.org/r/20200806111014.28434-5-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_gs.c | 18 +++++++++++++++--- + drivers/scsi/qla2xxx/qla_target.c | 2 +- + 2 files changed, 16 insertions(+), 4 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_gs.c ++++ b/drivers/scsi/qla2xxx/qla_gs.c +@@ -3538,10 +3538,22 @@ void qla24xx_async_gnnft_done(scsi_qla_h + } + + if (fcport->scan_state != QLA_FCPORT_FOUND) { ++ bool do_delete = false; ++ ++ if (fcport->scan_needed && ++ fcport->disc_state == DSC_LOGIN_PEND) { ++ /* Cable got disconnected after we sent ++ * a login. Do delete to prevent timeout. ++ */ ++ fcport->logout_on_delete = 1; ++ do_delete = true; ++ } ++ + fcport->scan_needed = 0; +- if ((qla_dual_mode_enabled(vha) || +- qla_ini_mode_enabled(vha)) && +- atomic_read(&fcport->state) == FCS_ONLINE) { ++ if (((qla_dual_mode_enabled(vha) || ++ qla_ini_mode_enabled(vha)) && ++ atomic_read(&fcport->state) == FCS_ONLINE) || ++ do_delete) { + if (fcport->loop_id != FC_NO_LOOP_ID) { + if (fcport->flags & FCF_FCP2_DEVICE) + fcport->logout_on_delete = 0; +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -1279,7 +1279,7 @@ void qlt_schedule_sess_for_deletion(stru + + qla24xx_chk_fcp_state(sess); + +- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, ++ ql_dbg(ql_dbg_disc, sess->vha, 0xe001, + "Scheduling sess %p for deletion %8phC\n", + sess, sess->port_name); + diff --git a/patches.suse/scsi-qla2xxx-Fix-null-pointer-access-during-disconne.patch b/patches.suse/scsi-qla2xxx-Fix-null-pointer-access-during-disconne.patch new file mode 100644 index 0000000..d370119 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-null-pointer-access-during-disconne.patch @@ -0,0 +1,69 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:12 -0700 +Subject: scsi: qla2xxx: Fix null pointer access during disconnect from + subsystem +Patch-mainline: v5.9-rc2 +Git-commit: 83949613fac61e8e37eadf8275bf072342302f4e +References: bsc#1171688 bsc#1174003 + +NVMEAsync command is being submitted to QLA while the same NVMe controller +is in the middle of reset. The reset path has deleted the association and +freed aen_op->fcp_req.private. Add a check for this private pointer before +issuing the command. + +... + 6 [ffffb656ca11fce0] page_fault at ffffffff8c00114e + [exception RIP: qla_nvme_post_cmd+394] + RIP: ffffffffc0d012ba RSP: ffffb656ca11fd98 RFLAGS: 00010206 + RAX: ffff8fb039eda228 RBX: ffff8fb039eda200 RCX: 00000000000da161 + RDX: ffffffffc0d4d0f0 RSI: ffffffffc0d26c9b RDI: ffff8fb039eda220 + RBP: 0000000000000013 R8: ffff8fb47ff6aa80 R9: 0000000000000002 + R10: 0000000000000000 R11: ffffb656ca11fdc8 R12: ffff8fb27d04a3b0 + R13: ffff8fc46dd98a58 R14: 0000000000000000 R15: ffff8fc4540f0000 + ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 + 7 [ffffb656ca11fe08] nvme_fc_start_fcp_op at ffffffffc0241568 [nvme_fc] + 8 [ffffb656ca11fe50] nvme_fc_submit_async_event at ffffffffc0241901 [nvme_fc] + 9 [ffffb656ca11fe68] nvme_async_event_work at ffffffffc014543d [nvme_core] +10 [ffffb656ca11fe98] process_one_work at ffffffff8b6cd437 +11 [ffffb656ca11fed8] worker_thread at ffffffff8b6cdcef +12 [ffffb656ca11ff10] kthread at ffffffff8b6d3402 +13 [ffffb656ca11ff50] ret_from_fork at ffffffff8c000255 + +-- +PID: 37824 TASK: ffff8fb033063d80 CPU: 20 COMMAND: "kworker/u97:451" + 0 [ffffb656ce1abc28] __schedule at ffffffff8be629e3 + 1 [ffffb656ce1abcc8] schedule at ffffffff8be62fe8 + 2 [ffffb656ce1abcd0] schedule_timeout at ffffffff8be671ed + 3 [ffffb656ce1abd70] wait_for_completion at ffffffff8be639cf + 4 [ffffb656ce1abdd0] flush_work at ffffffff8b6ce2d5 + 5 [ffffb656ce1abe70] nvme_stop_ctrl at ffffffffc0144900 [nvme_core] + 6 [ffffb656ce1abe80] nvme_fc_reset_ctrl_work at ffffffffc0243445 [nvme_fc] + 7 [ffffb656ce1abe98] process_one_work at ffffffff8b6cd437 + 8 [ffffb656ce1abed8] worker_thread at ffffffff8b6cdb50 + 9 [ffffb656ce1abf10] kthread at ffffffff8b6d3402 +10 [ffffb656ce1abf50] ret_from_fork at ffffffff8c000255 + +Link: https://lore.kernel.org/r/20200806111014.28434-10-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_nvme.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/scsi/qla2xxx/qla_nvme.c ++++ b/drivers/scsi/qla2xxx/qla_nvme.c +@@ -547,6 +547,11 @@ static int qla_nvme_post_cmd(struct nvme + struct nvme_private *priv = fd->private; + struct qla_nvme_rport *qla_rport = rport->private; + ++ if (!priv) { ++ /* nvme association has been torn down */ ++ return rval; ++ } ++ + fcport = qla_rport->fcport; + + if (!qpair || !fcport || (qpair && !qpair->fw_started) || diff --git a/patches.suse/scsi-qla2xxx-Fix-spelling-of-a-variable-name.patch b/patches.suse/scsi-qla2xxx-Fix-spelling-of-a-variable-name.patch new file mode 100644 index 0000000..687c22b --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-spelling-of-a-variable-name.patch @@ -0,0 +1,56 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:16:58 -0700 +Subject: scsi: qla2xxx: Fix spelling of a variable name +Patch-mainline: v5.8-rc1 +Git-commit: 246ee22583ed4847dcdd9a2f307eeca856f42882 +References: bsc#1171688 bsc#1174003 + +Change "offet" into "offset" in a variable name. + +Link: https://lore.kernel.org/r/20200518211712.11395-2-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Reviewed-by: Arun Easi +Reviewed-by: Roman Bolshakov +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_fw.h | 2 +- + drivers/scsi/qla2xxx/qla_init.c | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_fw.h ++++ b/drivers/scsi/qla2xxx/qla_fw.h +@@ -1292,7 +1292,7 @@ struct device_reg_24xx { + }; + /* RISC-RISC semaphore register PCI offet */ + #define RISC_REGISTER_BASE_OFFSET 0x7010 +-#define RISC_REGISTER_WINDOW_OFFET 0x6 ++#define RISC_REGISTER_WINDOW_OFFSET 0x6 + + /* RISC-RISC semaphore/flag register (risc address 0x7016) */ + +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -2863,7 +2863,7 @@ qla25xx_read_risc_sema_reg(scsi_qla_host + struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + + WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); +- *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET); ++ *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); + + } + +@@ -2873,7 +2873,7 @@ qla25xx_write_risc_sema_reg(scsi_qla_hos + struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + + WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); +- WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); ++ WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); + } + + static void diff --git a/patches.suse/scsi-qla2xxx-Fix-the-code-that-reads-from-mailbox-re.patch b/patches.suse/scsi-qla2xxx-Fix-the-code-that-reads-from-mailbox-re.patch new file mode 100644 index 0000000..5b51cbc --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-the-code-that-reads-from-mailbox-re.patch @@ -0,0 +1,268 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:07 -0700 +Subject: scsi: qla2xxx: Fix the code that reads from mailbox registers +Patch-mainline: v5.8-rc1 +Git-commit: 37139da1b097e06841d40a6055db64c78755aea9 +References: bsc#1171688 bsc#1174003 + +Make the MMIO accessors strongly typed such that the compiler checks +whether the accessor function is used that matches the register width. Fix +those MMIO accesses where another number of bits was read or written than +the size of the register. + +Link: https://lore.kernel.org/r/20200518211712.11395-11-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_def.h | 53 +++++++++++++++++++++++++++++++++------- + drivers/scsi/qla2xxx/qla_init.c | 6 ++-- + drivers/scsi/qla2xxx/qla_iocb.c | 2 - + drivers/scsi/qla2xxx/qla_isr.c | 4 +-- + drivers/scsi/qla2xxx/qla_mbx.c | 2 - + drivers/scsi/qla2xxx/qla_mr.c | 26 +++++++++---------- + drivers/scsi/qla2xxx/qla_nx.c | 4 +-- + drivers/scsi/qla2xxx/qla_os.c | 2 - + 8 files changed, 67 insertions(+), 32 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -128,15 +128,50 @@ static inline uint32_t make_handle(uint1 + * I/O register + */ + +-#define RD_REG_BYTE(addr) readb(addr) +-#define RD_REG_WORD(addr) readw(addr) +-#define RD_REG_DWORD(addr) readl(addr) +-#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr) +-#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr) +-#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr) +-#define WRT_REG_BYTE(addr, data) writeb(data, addr) +-#define WRT_REG_WORD(addr, data) writew(data, addr) +-#define WRT_REG_DWORD(addr, data) writel(data, addr) ++static inline u8 RD_REG_BYTE(const volatile u8 __iomem *addr) ++{ ++ return readb(addr); ++} ++ ++static inline u16 RD_REG_WORD(const volatile __le16 __iomem *addr) ++{ ++ return readw(addr); ++} ++ ++static inline u32 RD_REG_DWORD(const volatile __le32 __iomem *addr) ++{ ++ return readl(addr); ++} ++ ++static inline u8 RD_REG_BYTE_RELAXED(const volatile u8 __iomem *addr) ++{ ++ return readb_relaxed(addr); ++} ++ ++static inline u16 RD_REG_WORD_RELAXED(const volatile __le16 __iomem *addr) ++{ ++ return readw_relaxed(addr); ++} ++ ++static inline u32 RD_REG_DWORD_RELAXED(const volatile __le32 __iomem *addr) ++{ ++ return readl_relaxed(addr); ++} ++ ++static inline void WRT_REG_BYTE(volatile u8 __iomem *addr, u8 data) ++{ ++ return writeb(data, addr); ++} ++ ++static inline void WRT_REG_WORD(volatile __le16 __iomem *addr, u16 data) ++{ ++ return writew(data, addr); ++} ++ ++static inline void WRT_REG_DWORD(volatile __le32 __iomem *addr, u32 data) ++{ ++ return writel(data, addr); ++} + + /* + * ISP83XX specific remote register addresses +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -2221,7 +2221,7 @@ qla2x00_initialize_adapter(scsi_qla_host + + /* Check for secure flash support */ + if (IS_QLA28XX(ha)) { +- if (RD_REG_DWORD(®->mailbox12) & BIT_0) ++ if (RD_REG_WORD(®->mailbox12) & BIT_0) + ha->flags.secure_adapter = 1; + ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", + (ha->flags.secure_adapter) ? "Yes" : "No"); +@@ -2782,7 +2782,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, + "HCCR: 0x%x, MailBox0 Status 0x%x\n", + RD_REG_DWORD(®->hccr), +- RD_REG_DWORD(®->mailbox0)); ++ RD_REG_WORD(®->mailbox0)); + + /* Wait for soft-reset to complete. */ + RD_REG_DWORD(®->ctrl_status); +@@ -4096,7 +4096,7 @@ qla24xx_config_rings(struct scsi_qla_hos + } + + /* PCI posting */ +- RD_REG_DWORD(&ioreg->hccr); ++ RD_REG_WORD(&ioreg->hccr); + } + + /** +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -2268,7 +2268,7 @@ void * + IS_QLA28XX(ha)) + cnt = RD_REG_DWORD(®->isp25mq.req_q_out); + else if (IS_P3P_TYPE(ha)) +- cnt = RD_REG_DWORD(®->isp82.req_q_out); ++ cnt = RD_REG_DWORD(reg->isp82.req_q_out); + else if (IS_FWI2_CAPABLE(ha)) + cnt = RD_REG_DWORD(®->isp24.req_q_out); + else if (IS_QLAFX00(ha)) +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -451,7 +451,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, + int rval; + struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; +- uint16_t __iomem *wptr; ++ __le16 __iomem *wptr; + uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; + + /* Seed data -- mailbox1 -> mailbox7. */ +@@ -3163,7 +3163,7 @@ qla24xx_mbx_completion(scsi_qla_host_t * + { + uint16_t cnt; + uint32_t mboxes; +- uint16_t __iomem *wptr; ++ __le16 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -106,7 +106,7 @@ qla2x00_mailbox_command(scsi_qla_host_t + uint8_t io_lock_on; + uint16_t command = 0; + uint16_t *iptr; +- uint16_t __iomem *optr; ++ __le16 __iomem *optr; + uint32_t cnt; + uint32_t mboxes; + unsigned long wait_time; +--- a/drivers/scsi/qla2xxx/qla_mr.c ++++ b/drivers/scsi/qla2xxx/qla_mr.c +@@ -46,7 +46,7 @@ qlafx00_mailbox_command(scsi_qla_host_t + uint8_t io_lock_on; + uint16_t command = 0; + uint32_t *iptr; +- uint32_t __iomem *optr; ++ __le32 __iomem *optr; + uint32_t cnt; + uint32_t mboxes; + unsigned long wait_time; +@@ -109,7 +109,7 @@ qlafx00_mailbox_command(scsi_qla_host_t + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Load mailbox registers. */ +- optr = (uint32_t __iomem *)®->ispfx00.mailbox0; ++ optr = ®->ispfx00.mailbox0; + + iptr = mcp->mb; + command = mcp->mb[0]; +@@ -2843,13 +2843,13 @@ qlafx00_async_event(scsi_qla_host_t *vha + break; + + default: +- ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); +- ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); +- ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); +- ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); +- ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); +- ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); +- ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); ++ ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); ++ ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); ++ ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); ++ ha->aenmb[4] = RD_REG_DWORD(®->aenmailbox4); ++ ha->aenmb[5] = RD_REG_DWORD(®->aenmailbox5); ++ ha->aenmb[6] = RD_REG_DWORD(®->aenmailbox6); ++ ha->aenmb[7] = RD_REG_DWORD(®->aenmailbox7); + ql_dbg(ql_dbg_async, vha, 0x5078, + "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", + ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], +@@ -2869,7 +2869,7 @@ static void + qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) + { + uint16_t cnt; +- uint32_t __iomem *wptr; ++ __le32 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + +@@ -2879,7 +2879,7 @@ qlafx00_mbx_completion(scsi_qla_host_t * + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; + ha->mailbox_out32[0] = mb0; +- wptr = (uint32_t __iomem *)®->mailbox17; ++ wptr = ®->mailbox17; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); +@@ -2936,13 +2936,13 @@ qlafx00_intr_handler(int irq, void *dev_ + break; + + if (stat & QLAFX00_INTR_MB_CMPLT) { +- mb[0] = RD_REG_WORD(®->mailbox16); ++ mb[0] = RD_REG_DWORD(®->mailbox16); + qlafx00_mbx_completion(vha, mb[0]); + status |= MBX_INTERRUPT; + clr_intr |= QLAFX00_INTR_MB_CMPLT; + } + if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { +- ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); ++ ha->aenmb[0] = RD_REG_DWORD(®->aenmailbox0); + qlafx00_async_event(vha); + clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; + } +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -1996,11 +1996,11 @@ void + qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) + { + uint16_t cnt; +- uint16_t __iomem *wptr; ++ __le16 __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; + +- wptr = (uint16_t __iomem *)®->mailbox_out[1]; ++ wptr = ®->mailbox_out[1]; + + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -7569,7 +7569,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev + + spin_lock_irqsave(&ha->hardware_lock, flags); + if (IS_QLA2100(ha) || IS_QLA2200(ha)){ +- stat = RD_REG_DWORD(®->hccr); ++ stat = RD_REG_WORD(®->hccr); + if (stat & HCCR_RISC_PAUSE) + risc_paused = 1; + } else if (IS_QLA23XX(ha)) { diff --git a/patches.suse/scsi-qla2xxx-Fix-warning-after-FC-target-reset.patch b/patches.suse/scsi-qla2xxx-Fix-warning-after-FC-target-reset.patch new file mode 100644 index 0000000..c0b2193 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Fix-warning-after-FC-target-reset.patch @@ -0,0 +1,101 @@ +From: Viacheslav Dubeyko +Date: Fri, 10 Apr 2020 11:07:08 +0300 +Subject: scsi: qla2xxx: Fix warning after FC target reset +Patch-mainline: v5.8-rc1 +Git-commit: f839544ccff60cbe534282aac68858fc3fb278ca +References: bsc#1171688 bsc#1174003 + +Currently, FC target reset finishes with the warning message: + +[84010.596893] ------------[ cut here ]------------ +[84010.596917] WARNING: CPU: 238 PID: 279973 at ../drivers/scsi/qla2xxx/qla_target.c:6644 qlt_enable_vha+0x1d0/0x260 [qla2xxx] +[84010.596918] Modules linked in: vrf af_packet 8021q garp mrp stp llc netlink_diag target_tatlin_tblock(OEX) dm_ec(OEX) ttln_rdma(OEX) dm_frontend(OEX) nvme_rdma nvmet tcm_qla2xxx iscsi_target_mod target_core_mod at24 nvmem_core pnv_php ipmi_watchdog ipmi_ssif vmx_crypto gf128mul crct10dif_vpmsum qla2xxx rpcrdma nvme_fc powernv_flash(X) nvme_fabrics uio_pdrv_genirq mtd rtc_opal(X) ibmpowernv(X) opal_prd(X) uio scsi_transport_fc i2c_opal(X) ses enclosure ipmi_poweroff ast i2c_algo_bit ttm bmc_mcu(OEX) drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm drm_panel_orientation_quirks agpgart nfsd auth_rpcgss nfs_acl ipmi_powernv(X) lockd ipmi_devintf ipmi_msghandler grace dummy ext4 crc16 jbd2 mbcache sd_mod rdma_ucm ib_iser rdma_cm ib_umad iw_cm ib_ipoib libiscsi scsi_transport_iscsi ib_cm +[84010.596975] configfs mlx5_ib ib_uverbs ib_core mlx5_core crc32c_vpmsum xhci_pci xhci_hcd mpt3sas(OEX) tg3 usbcore mlxfw tls raid_class libphy scsi_transport_sas devlink ptp pps_core nvme nvme_core sunrpc dm_mirror dm_region_hash dm_log sg dm_multipath dm_mod scsi_dh_rdac scsi_dh_emc scsi_dh_alua scsi_mod autofs4 +[84010.597001] Supported: Yes, External +[84010.597004] CPU: 238 PID: 279973 Comm: bash Tainted: G OE 4.12.14-197.29-default #1 SLE15-SP1 +[84010.597006] task: c000000a104c0000 task.stack: c000000b52188000 +[84010.597007] NIP: d00000001ffd7f78 LR: d00000001ffd7f6c CTR: c0000000001676c0 +[84010.597008] REGS: c000000b5218b910 TRAP: 0700 Tainted: G OE (4.12.14-197.29-default) +[84010.597008] MSR: 900000010282b033 +[84010.597015] CR: 48242424 XER: 00000000 +[84010.597016] CFAR: d00000001ff45d08 SOFTE: 1 + GPR00: d00000001ffd7f6c c000000b5218bb90 d00000002001b228 0000000000000102 + GPR04: 0000000000000001 0000000000000001 00013d91ed0a5e2d 0000000000000000 + GPR08: c000000007793300 0000000000000000 0000000000000000 c000000a086e7818 + GPR12: 0000000000002200 c000000007793300 0000000000000000 000000012bc937c0 + GPR16: 000000012bbf7ed0 0000000000000000 000000012bc3dd10 0000000000000000 + GPR20: 000000012bc4db28 0000010036442810 000000012bc97828 000000012bc96c70 + GPR24: 00000100365b1550 0000000000000000 00000100363f3d80 c000000be20d3080 + GPR28: c000000bda7eae00 c000000be20db7e8 c000000be20d3778 c000000be20db7e8 +[84010.597042] NIP [d00000001ffd7f78] qlt_enable_vha+0x1d0/0x260 [qla2xxx] +[84010.597051] LR [d00000001ffd7f6c] qlt_enable_vha+0x1c4/0x260 [qla2xxx] +[84010.597051] Call Trace: +[84010.597061] [c000000b5218bb90] [d00000001ffd7f6c] qlt_enable_vha+0x1c4/0x260 [qla2xxx] (unreliable) +[84010.597064] [c000000b5218bc20] [d000000009820b6c] tcm_qla2xxx_tpg_enable_store+0xc4/0x130 [tcm_qla2xxx] +[84010.597067] [c000000b5218bcb0] [d0000000185d0e68] configfs_write_file+0xd0/0x190 [configfs] +[84010.597072] [c000000b5218bd00] [c0000000003d0edc] __vfs_write+0x3c/0x1e0 +[84010.597074] [c000000b5218bd90] [c0000000003d2ea8] vfs_write+0xd8/0x220 +[84010.597076] [c000000b5218bde0] [c0000000003d4ddc] SyS_write+0x6c/0x110 +[84010.597079] [c000000b5218be30] [c00000000000b188] system_call+0x3c/0x130 +[84010.597080] Instruction dump: +[84010.597082] 7d0050a8 7d084b78 7d0051ad 40c2fff4 7fa3eb78 4bf73965 60000000 7fa3eb78 +[84010.597086] 4bf6dcd9 60000000 2fa30000 419eff40 <0fe00000> 4bffff38 e95f0058 a12a0180 +[84010.597090] ---[ end trace e32abaf6e6fee826 ]--- + +To reproduce: + +echo 0x7fffffff > /sys/module/qla2xxx/parameters/logging +modprobe target_core_mod +modprobe tcm_qla2xxx +mkdir /sys/kernel/config/target/qla2xxx +mkdir /sys/kernel/config/target/qla2xxx/ +mkdir /sys/kernel/config/target/qla2xxx//tpgt_1 +echo 1 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable +echo 0 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable +echo 1 > /sys/kernel/config/target/qla2xxx//tpgt_1/enable + +SYSTEM START +kernel: pid 327:drivers/scsi/qla2xxx/qla_init.c:2174 qla2x00_initialize_adapter(): vha->flags.online 0x0 +<...> +kernel: pid 327:drivers/scsi/qla2xxx/qla_os.c:3444 qla2x00_probe_one(): vha->flags.online 0x1 + +echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:86:a6:2a/tpgt_1/enable +kernel: pid 348:drivers/scsi/qla2xxx/qla_init.c:6641 qla2x00_abort_isp_cleanup(): vha->flags.online 0x0, ISP_ABORT_NEEDED 0x0 +<...> +kernel: pid 348:drivers/scsi/qla2xxx/qla_init.c:6998 qla2x00_restart_isp(): vha->flags.online 0x0 + +echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:86:a6:2a/tpgt_1/enable +kernel: pid 348:drivers/scsi/qla2xxx/qla_init.c:6641 qla2x00_abort_isp_cleanup(): vha->flags.online 0x0, ISP_ABORT_NEEDED 0x0 +<...> +kernel: pid 1404:drivers/scsi/qla2xxx/qla_os.c:1107 qla2x00_wait_for_hba_online(): base_vha->flags.online 0x0 + +echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:86:a6:2a/tpgt_1/enable +kernel: pid 1404:drivers/scsi/qla2xxx/qla_os.c:1107 qla2x00_wait_for_hba_online(): base_vha->flags.online 0x0 +kernel: -----------[ cut here ]----------- +kernel: WARNING: CPU: 1 PID: 1404 at drivers/scsi/qla2xxx/qla_target.c:6654 qlt_enable_vha+0x1e0/0x280 [qla2xxx] + +The issue happens because no real ISP reset is executed. The +qla2x00_abort_isp(scsi_qla_host_t *vha) function expects that +vha->flags.online will be not zero for ISP reset procedure. This patch +sets vha->flags.online to 1 before calling ->abort_isp() for starting the +ISP reset. + +Link: https://lore.kernel.org/r/1d7b21bf9f7676643239eb3d60eaca7cfa505cf0.camel@yadro.com +Reviewed-by: Roman Bolshakov +Signed-off-by: Viacheslav Dubeyko +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_os.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -6890,6 +6890,7 @@ qla2x00_do_dpc(void *data) + + if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, + &base_vha->dpc_flags))) { ++ base_vha->flags.online = 1; + ql_dbg(ql_dbg_dpc, base_vha, 0x4007, + "ISP abort scheduled.\n"); + if (ha->isp_ops->abort_isp(base_vha)) { diff --git a/patches.suse/scsi-qla2xxx-Flush-I-O-on-zone-disable.patch b/patches.suse/scsi-qla2xxx-Flush-I-O-on-zone-disable.patch new file mode 100644 index 0000000..0aca7a3 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Flush-I-O-on-zone-disable.patch @@ -0,0 +1,30 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:05 -0700 +Subject: scsi: qla2xxx: Flush I/O on zone disable +Patch-mainline: v5.9-rc2 +Git-commit: a117579d0205b5a0592a3a98493e2b875e4da236 +References: bsc#1171688 bsc#1174003 + +Perform implicit logout to flush I/O on zone disable. + +Link: https://lore.kernel.org/r/20200806111014.28434-3-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Himanshu Madhani +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_gs.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/scsi/qla2xxx/qla_gs.c ++++ b/drivers/scsi/qla2xxx/qla_gs.c +@@ -3436,7 +3436,6 @@ void qla24xx_async_gnnft_done(scsi_qla_h + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { + fcport->scan_state = QLA_FCPORT_SCAN; +- fcport->logout_on_delete = 0; + } + } + goto login_logout; diff --git a/patches.suse/scsi-qla2xxx-Flush-all-sessions-on-zone-disable.patch b/patches.suse/scsi-qla2xxx-Flush-all-sessions-on-zone-disable.patch new file mode 100644 index 0000000..006ebc6 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Flush-all-sessions-on-zone-disable.patch @@ -0,0 +1,43 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:04 -0700 +Subject: scsi: qla2xxx: Flush all sessions on zone disable +Patch-mainline: v5.9-rc2 +Git-commit: 10ae30ba664822f62de169a61628e31c999c7cc8 +References: bsc#1171688 bsc#1174003 + +On Zone Disable, certain switches would ignore all commands. This causes +timeout for both switch scan command and abort of that command. On +detection of this condition, all sessions will be shutdown. + +Link: https://lore.kernel.org/r/20200806111014.28434-2-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Himanshu Madhani +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_gs.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/drivers/scsi/qla2xxx/qla_gs.c ++++ b/drivers/scsi/qla2xxx/qla_gs.c +@@ -3736,6 +3736,18 @@ static void qla2x00_async_gpnft_gnnft_sp + unsigned long flags; + const char *name = sp->name; + ++ if (res == QLA_OS_TIMER_EXPIRED) { ++ /* switch is ignoring all commands. ++ * This might be a zone disable behavior. ++ * This means we hit 64s timeout. ++ * 22s GPNFT + 44s Abort = 64s ++ */ ++ ql_dbg(ql_dbg_disc, vha, 0xffff, ++ "%s: Switch Zone check please .\n", ++ name); ++ qla2x00_mark_all_devices_lost(vha); ++ } ++ + /* + * We are in an Interrupt context, queue up this + * sp for GNNFT_DONE work. This will allow all diff --git a/patches.suse/scsi-qla2xxx-Increase-the-size-of-struct-qla_fcp_pri.patch b/patches.suse/scsi-qla2xxx-Increase-the-size-of-struct-qla_fcp_pri.patch new file mode 100644 index 0000000..23ab954 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Increase-the-size-of-struct-qla_fcp_pri.patch @@ -0,0 +1,57 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:04 -0700 +Subject: scsi: qla2xxx: Increase the size of struct qla_fcp_prio_cfg to + FCP_PRIO_CFG_SIZE +Patch-mainline: v5.8-rc1 +Git-commit: d9ab5f1f05fc147682d4fd1f1f1c5af00b766e33 +References: bsc#1171688 bsc#1174003 + +This patch fixes the following Coverity complaint without changing any +functionality: + +CID 337793 (#1 of 1): Wrong size argument (SIZEOF_MISMATCH) +suspicious_sizeof: Passing argument ha->fcp_prio_cfg of type +struct qla_fcp_prio_cfg * and argument 32768UL to function memset is +suspicious because a multiple of sizeof (struct qla_fcp_prio_cfg) /*48*/ +is expected. + +memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); + +Link: https://lore.kernel.org/r/20200518211712.11395-8-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_fw.h | 3 ++- + drivers/scsi/qla2xxx/qla_os.c | 1 + + 2 files changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/scsi/qla2xxx/qla_fw.h ++++ b/drivers/scsi/qla2xxx/qla_fw.h +@@ -2217,8 +2217,9 @@ struct qla_fcp_prio_cfg { + #define FCP_PRIO_ATTR_PERSIST 0x2 + uint8_t reserved; /* Reserved for future use */ + #define FCP_PRIO_CFG_HDR_SIZE 0x10 +- struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */ ++ struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */ + #define FCP_PRIO_CFG_ENTRY_SIZE 0x20 ++ uint8_t reserved2[16]; + }; + + #define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/ +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -7896,6 +7896,7 @@ qla2x00_module_init(void) + BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); + BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); + BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); ++ BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); + BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); + BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); + BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); diff --git a/patches.suse/scsi-qla2xxx-Indicate-correct-supported-speeds-for-M.patch b/patches.suse/scsi-qla2xxx-Indicate-correct-supported-speeds-for-M.patch new file mode 100644 index 0000000..3c67908 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Indicate-correct-supported-speeds-for-M.patch @@ -0,0 +1,55 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:06 -0700 +Subject: scsi: qla2xxx: Indicate correct supported speeds for Mezz card +Patch-mainline: v5.9-rc2 +Git-commit: 4709272f6327cc4a8ee1dc55771bcf9718346980 +References: bsc#1171688 bsc#1174003 + +Correct the supported speeds for 16G Mezz card. + +Link: https://lore.kernel.org/r/20200806111014.28434-4-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_gs.c | 17 ++++++++++++----- + 1 file changed, 12 insertions(+), 5 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_gs.c ++++ b/drivers/scsi/qla2xxx/qla_gs.c +@@ -1505,11 +1505,11 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_p + static uint + qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) + { ++ uint speeds = 0; ++ + if (IS_CNA_CAPABLE(ha)) + return FDMI_PORT_SPEED_10GB; + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { +- uint speeds = 0; +- + if (ha->max_supported_speed == 2) { + if (ha->min_supported_speed <= 6) + speeds |= FDMI_PORT_SPEED_64GB; +@@ -1536,9 +1536,16 @@ qla25xx_fdmi_port_speed_capability(struc + } + return speeds; + } +- if (IS_QLA2031(ha)) +- return FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| +- FDMI_PORT_SPEED_4GB; ++ if (IS_QLA2031(ha)) { ++ if ((ha->pdev->subsystem_vendor == 0x103C) && ++ (ha->pdev->subsystem_device == 0x8002)) { ++ speeds = FDMI_PORT_SPEED_16GB; ++ } else { ++ speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| ++ FDMI_PORT_SPEED_4GB; ++ } ++ return speeds; ++ } + if (IS_QLA25XX(ha)) + return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| + FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; diff --git a/patches.suse/scsi-qla2xxx-Initialize-n-before-using-it.patch b/patches.suse/scsi-qla2xxx-Initialize-n-before-using-it.patch new file mode 100644 index 0000000..62ef01f --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Initialize-n-before-using-it.patch @@ -0,0 +1,43 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:49 -0700 +Subject: scsi: qla2xxx: Initialize 'n' before using it +Patch-mainline: v5.9-rc1 +Git-commit: 67668b5b13c7ba12f212d4b9067e54354ce5360b +References: bsc#1171688 bsc#1174003 + +The following code: + + qla82xx_rom_fast_read(ha, 0, &n) + +only initializes 'n' if it succeeds. Since 'n' may be reported in a debug +message even if no ROM reads succeeded, initialize 'n' to zero. + +This patch fixes the following sparse warning: + +qla_nx.c:1218: qla82xx_pinit_from_rom() error: uninitialized symbol 'n'. + +Link: https://lore.kernel.org/r/20200629225454.22863-5-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Shyam Sundar +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_nx.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -1167,6 +1167,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t * + * Offset 4: Offset and number of addr/value pairs + * that present in CRB initialize sequence + */ ++ n = 0; + if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || + qla82xx_rom_fast_read(ha, 4, &n) != 0) { + ql_log(ql_log_fatal, vha, 0x006e, diff --git a/patches.suse/scsi-qla2xxx-Introduce-a-function-for-computing-the-.patch b/patches.suse/scsi-qla2xxx-Introduce-a-function-for-computing-the-.patch new file mode 100644 index 0000000..840f0a8 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Introduce-a-function-for-computing-the-.patch @@ -0,0 +1,194 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:54 -0700 +Subject: scsi: qla2xxx: Introduce a function for computing the debug message + prefix +Patch-mainline: v5.9-rc1 +Git-commit: e7019c95c40daf8e8a9fa1d5f36eabce2eb8ef6b +References: bsc#1171688 bsc#1174003 + +Instead of repeating the code for generating a debug message prefix six +times, introduce a function for computing the debug message prefix. + +Link: https://lore.kernel.org/r/20200629225454.22863-10-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.c | 96 +++++++++++++---------------------------- + 1 file changed, 32 insertions(+), 64 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -2447,6 +2447,23 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) + /* Driver Debug Functions. */ + /****************************************************************************/ + ++/* Write the debug message prefix into @pbuf. */ ++static void ql_dbg_prefix(char *pbuf, int pbuf_size, ++ const scsi_qla_host_t *vha, uint msg_id) ++{ ++ if (vha) { ++ const struct pci_dev *pdev = vha->hw->pdev; ++ ++ /* []-:: */ ++ snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%ld: ", QL_MSGHDR, ++ dev_name(&(pdev->dev)), msg_id, vha->host_no); ++ } else { ++ /* []-: : */ ++ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, ++ "0000:00:00.0", msg_id); ++ } ++} ++ + /* + * This function is for formatting and logging debug information. + * It is to be used when vha is available. It formats the message +@@ -2465,41 +2482,19 @@ ql_dbg(uint level, scsi_qla_host_t *vha, + { + va_list va; + struct va_format vaf; ++ char pbuf[64]; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + +- if (!ql_mask_match(level)) { +- char pbuf[64]; ++ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); + +- if (vha != NULL) { +- const struct pci_dev *pdev = vha->hw->pdev; +- /* : Message */ +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", +- QL_MSGHDR, dev_name(&(pdev->dev)), id, +- vha->host_no); +- } else { +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", +- QL_MSGHDR, "0000:00:00.0", id); +- } +- pbuf[sizeof(pbuf) - 1] = 0; ++ if (!ql_mask_match(level)) + trace_ql_dbg_log(pbuf, &vaf); +- va_end(va); +- return; +- } +- +- if (vha != NULL) { +- const struct pci_dev *pdev = vha->hw->pdev; +- /* : Message */ +- pr_warn("%s [%s]-%04x:%ld: %pV", +- QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, +- vha->host_no, &vaf); +- } else { +- pr_warn("%s [%s]-%04x: : %pV", +- QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); +- } ++ else ++ pr_warn("%s%pV", pbuf, &vaf); + + va_end(va); + +@@ -2524,6 +2519,7 @@ ql_dbg_pci(uint level, struct pci_dev *p + { + va_list va; + struct va_format vaf; ++ char pbuf[128]; + + if (pdev == NULL) + return; +@@ -2535,9 +2531,8 @@ ql_dbg_pci(uint level, struct pci_dev *p + vaf.fmt = fmt; + vaf.va = &va; + +- /* : Message */ +- pr_warn("%s [%s]-%04x: : %pV", +- QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); ++ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset); ++ pr_warn("%s%pV", pbuf, &vaf); + + va_end(va); + } +@@ -2565,16 +2560,7 @@ ql_log(uint level, scsi_qla_host_t *vha, + if (level > ql_errlev) + return; + +- if (vha != NULL) { +- const struct pci_dev *pdev = vha->hw->pdev; +- /* : Message */ +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", +- QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); +- } else { +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", +- QL_MSGHDR, "0000:00:00.0", id); +- } +- pbuf[sizeof(pbuf) - 1] = 0; ++ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); + + va_start(va, fmt); + +@@ -2625,10 +2611,7 @@ ql_log_pci(uint level, struct pci_dev *p + if (level > ql_errlev) + return; + +- /* : Message */ +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", +- QL_MSGHDR, dev_name(&(pdev->dev)), id); +- pbuf[sizeof(pbuf) - 1] = 0; ++ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id); + + va_start(va, fmt); + +@@ -2724,16 +2707,7 @@ ql_log_qp(uint32_t level, struct qla_qpa + if (level > ql_errlev) + return; + +- if (qpair != NULL) { +- const struct pci_dev *pdev = qpair->pdev; +- /* : Message */ +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ", +- QL_MSGHDR, dev_name(&(pdev->dev)), id); +- } else { +- snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", +- QL_MSGHDR, "0000:00:00.0", id); +- } +- pbuf[sizeof(pbuf) - 1] = 0; ++ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id); + + va_start(va, fmt); + +@@ -2777,6 +2751,7 @@ ql_dbg_qp(uint32_t level, struct qla_qpa + { + va_list va; + struct va_format vaf; ++ char pbuf[128]; + + if (!ql_mask_match(level)) + return; +@@ -2786,16 +2761,9 @@ ql_dbg_qp(uint32_t level, struct qla_qpa + vaf.fmt = fmt; + vaf.va = &va; + +- if (qpair != NULL) { +- const struct pci_dev *pdev = qpair->pdev; +- /* : Message */ +- pr_warn("%s [%s]-%04x: %pV", +- QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, +- &vaf); +- } else { +- pr_warn("%s [%s]-%04x: : %pV", +- QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); +- } ++ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, ++ id + ql_dbg_offset); ++ pr_warn("%s%pV", pbuf, &vaf); + + va_end(va); + diff --git a/patches.suse/scsi-qla2xxx-Keep-initiator-ports-after-RSCN.patch b/patches.suse/scsi-qla2xxx-Keep-initiator-ports-after-RSCN.patch new file mode 100644 index 0000000..1c1d2ab --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Keep-initiator-ports-after-RSCN.patch @@ -0,0 +1,76 @@ +From: Roman Bolshakov +Date: Fri, 5 Jun 2020 17:44:37 +0300 +Subject: scsi: qla2xxx: Keep initiator ports after RSCN +Patch-mainline: v5.8-rc3 +Git-commit: 632f24f09d5b7c8a2f94932c3391ca957ae76cc4 +References: bsc#1171688 bsc#1174003 + +The driver performs SCR (state change registration) in all modes including +pure target mode. + +For each RSCN, scan_needed flag is set in qla2x00_handle_rscn() for the +port mentioned in the RSCN and fabric rescan is scheduled. During the +rescan, GNN_FT handler, qla24xx_async_gnnft_done() deletes session of the +port that caused the RSCN. + +In target mode, the session deletion has an impact on ATIO handler, +qlt_24xx_atio_pkt(). Target responds with SAM STATUS BUSY to I/O incoming +from the deleted session. qlt_handle_cmd_for_atio() and +qlt_handle_task_mgmt() return -EFAULT if they are not able to find session +of the command/TMF, and that results in invocation of qlt_send_busy(): + + qlt_24xx_atio_pkt_all_vps: qla_target(0): type 6 ox_id 0014 + qla_target(0): Unable to send command to target, sending BUSY status + +Such response causes command timeout on the initiator. Error handler thread +on the initiator will be spawned to abort the commands: + + scsi 23:0:0:0: tag#0 abort scheduled + scsi 23:0:0:0: tag#0 aborting command + qla2xxx [0000:af:00.0]-188c:23: Entered qla24xx_abort_command. + qla2xxx [0000:af:00.0]-801c:23: Abort command issued nexus=23:0:0 -- 0 2003. + +Command abort is rejected by target and fails (2003), error handler then +tries to perform DEVICE RESET and TARGET RESET but they're also doomed to +fail because TMFs are ignored for the deleted sessions. + +Then initiator makes BUS RESET that resets the link via +qla2x00_full_login_lip(). BUS RESET succeeds and brings initiator port up, +SAN switch detects that and sends RSCN to the target port and it fails +again the same way as described above. It never goes out of the loop. + +The change breaks the RSCN loop by keeping initiator sessions mentioned in +RSCN payload in all modes, including dual and pure target mode. + +Link: https://lore.kernel.org/r/20200605144435.27023-1-r.bolshakov@yadro.com +Fixes: 2037ce49d30a ("scsi: qla2xxx: Fix stale session") +Cc: Quinn Tran +Cc: Arun Easi +Cc: Nilesh Javali +Cc: Bart Van Assche +Cc: Daniel Wagner +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: stable@vger.kernel.org # v5.4+ +Reviewed-by: Daniel Wagner +Reviewed-by: Shyam Sundar +Reviewed-by: Himanshu Madhani +Signed-off-by: Roman Bolshakov +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_gs.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/scsi/qla2xxx/qla_gs.c ++++ b/drivers/scsi/qla2xxx/qla_gs.c +@@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_h + qla2x00_clear_loop_id(fcport); + fcport->flags |= FCF_FABRIC_DEVICE; + } else if (fcport->d_id.b24 != rp->id.b24 || +- fcport->scan_needed) { ++ (fcport->scan_needed && ++ fcport->port_type != FCT_INITIATOR && ++ fcport->port_type != FCT_NVME_INITIATOR)) { + qlt_schedule_sess_for_deletion(fcport); + } + fcport->d_id.b24 = rp->id.b24; diff --git a/patches.suse/scsi-qla2xxx-Make-__qla2x00_alloc_iocbs-initialize-3.patch b/patches.suse/scsi-qla2xxx-Make-__qla2x00_alloc_iocbs-initialize-3.patch new file mode 100644 index 0000000..35aa94d --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Make-__qla2x00_alloc_iocbs-initialize-3.patch @@ -0,0 +1,51 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:51 -0700 +Subject: scsi: qla2xxx: Make __qla2x00_alloc_iocbs() initialize 32 bits of + request_t.handle +Patch-mainline: v5.9-rc1 +Git-commit: f8f12bda53eae87ca2dea42b36d19e48c9851b9f +References: bsc#1171688 bsc#1174003 + +The request_t 'handle' member is 32-bits wide, hence use wrt_reg_dword(). +Change the cast in the wrt_reg_byte() call to make it clear that a regular +pointer is casted to an __iomem pointer. + +Note: 'pkt' points to I/O memory for the qlafx00 adapter family and to +coherent memory for all other adapter families. + +This patch fixes the following Coverity complaint: + +CID 358864 (#1 of 1): Reliance on integer endianness (INCOMPATIBLE_CAST) +incompatible_cast: Pointer &pkt->handle points to an object whose effective +type is unsigned int (32 bits, unsigned) but is dereferenced as a narrower +unsigned short (16 bits, unsigned). This may lead to unexpected results +depending on machine endianness. + +Link: https://lore.kernel.org/r/20200629225454.22863-7-bvanassche@acm.org +Fixes: 8ae6d9c7eb10 ("[SCSI] qla2xxx: Enhancements to support ISPFx00.") +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_iocb.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -2305,8 +2305,8 @@ void * + pkt = req->ring_ptr; + memset(pkt, 0, REQUEST_ENTRY_SIZE); + if (IS_QLAFX00(ha)) { +- wrt_reg_byte((void __iomem *)&pkt->entry_count, req_cnt); +- wrt_reg_word((void __iomem *)&pkt->handle, handle); ++ wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); ++ wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); + } else { + pkt->entry_count = req_cnt; + pkt->handle = handle; diff --git a/patches.suse/scsi-qla2xxx-Make-a-gap-in-struct-qla2xxx_offld_chai.patch b/patches.suse/scsi-qla2xxx-Make-a-gap-in-struct-qla2xxx_offld_chai.patch new file mode 100644 index 0000000..7532b04 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Make-a-gap-in-struct-qla2xxx_offld_chai.patch @@ -0,0 +1,34 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:03 -0700 +Subject: scsi: qla2xxx: Make a gap in struct qla2xxx_offld_chain explicit +Patch-mainline: v5.8-rc1 +Git-commit: 66f86367771507c88a970644d13646f6b57e82e5 +References: bsc#1171688 bsc#1174003 + +This patch makes struct qla2xxx_offld_chain compatible with ARCH=i386. + +Link: https://lore.kernel.org/r/20200518211712.11395-7-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Hannes Reinecke +Reviewed-by: Arun Easi +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/scsi/qla2xxx/qla_dbg.h ++++ b/drivers/scsi/qla2xxx/qla_dbg.h +@@ -238,6 +238,7 @@ struct qla2xxx_offld_chain { + uint32_t chain_size; + + uint32_t size; ++ uint32_t reserved; + u64 addr; + }; + diff --git a/patches.suse/scsi-qla2xxx-Make-qla2x00_restart_isp-easier-to-read.patch b/patches.suse/scsi-qla2xxx-Make-qla2x00_restart_isp-easier-to-read.patch new file mode 100644 index 0000000..de5f20a --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Make-qla2x00_restart_isp-easier-to-read.patch @@ -0,0 +1,88 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:53 -0700 +Subject: scsi: qla2xxx: Make qla2x00_restart_isp() easier to read +Patch-mainline: v5.9-rc1 +Git-commit: f85a299f5ec55c82403cd2bc4aeaaedebfcf5ef6 +References: bsc#1171688 bsc#1174003 + +Instead of using complicated control flow to only have one return statement +at the end of qla2x00_restart_isp(), return an error status as soon as it +is known that this function will fail. + +Link: https://lore.kernel.org/r/20200629225454.22863-9-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_init.c | 43 ++++++++++++++++++++++------------------ + 1 file changed, 24 insertions(+), 19 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -6996,36 +6996,41 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) + static int + qla2x00_restart_isp(scsi_qla_host_t *vha) + { +- int status = 0; ++ int status; + struct qla_hw_data *ha = vha->hw; + + /* If firmware needs to be loaded */ + if (qla2x00_isp_firmware(vha)) { + vha->flags.online = 0; + status = ha->isp_ops->chip_diag(vha); +- if (!status) +- status = qla2x00_setup_chip(vha); ++ if (status) ++ return status; ++ status = qla2x00_setup_chip(vha); ++ if (status) ++ return status; + } + +- if (!status && !(status = qla2x00_init_rings(vha))) { +- clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); +- ha->flags.chip_reset_done = 1; +- +- /* Initialize the queues in use */ +- qla25xx_init_queues(ha); +- +- status = qla2x00_fw_ready(vha); +- if (!status) { +- /* Issue a marker after FW becomes ready. */ +- qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); +- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); +- } ++ status = qla2x00_init_rings(vha); ++ if (status) ++ return status; + ++ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ++ ha->flags.chip_reset_done = 1; ++ ++ /* Initialize the queues in use */ ++ qla25xx_init_queues(ha); ++ ++ status = qla2x00_fw_ready(vha); ++ if (status) { + /* if no cable then assume it's good */ +- if ((vha->device_flags & DFLG_NO_CABLE)) +- status = 0; ++ return vha->device_flags & DFLG_NO_CABLE ? 0 : status; + } +- return (status); ++ ++ /* Issue a marker after FW becomes ready. */ ++ qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); ++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); ++ ++ return 0; + } + + static int diff --git a/patches.suse/scsi-qla2xxx-Make-qla82xx_flash_wait_write_finish-ea.patch b/patches.suse/scsi-qla2xxx-Make-qla82xx_flash_wait_write_finish-ea.patch new file mode 100644 index 0000000..75f20fb --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Make-qla82xx_flash_wait_write_finish-ea.patch @@ -0,0 +1,62 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:48 -0700 +Subject: scsi: qla2xxx: Make qla82xx_flash_wait_write_finish() easier to read +Patch-mainline: v5.9-rc1 +Git-commit: 2f91a0a03c2da5140994d7b3d53469d07df9943c +References: bsc#1171688 bsc#1174003 + +Return early instead of having a single return statement at the end of this +function. This patch fixes the following sparse warning: + +qla_nx.c:1018: qla82xx_flash_wait_write_finish() error: uninitialized symbol 'val'. + +Link: https://lore.kernel.org/r/20200629225454.22863-4-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_nx.c | 19 +++++++------------ + 1 file changed, 7 insertions(+), 12 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -966,26 +966,21 @@ qla82xx_read_status_reg(struct qla_hw_da + static int + qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) + { +- long timeout = 0; +- uint32_t done = 1 ; + uint32_t val; +- int ret = 0; ++ int i, ret; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); +- while ((done != 0) && (ret == 0)) { ++ for (i = 0; i < 50000; i++) { + ret = qla82xx_read_status_reg(ha, &val); +- done = val & 1; +- timeout++; ++ if (ret < 0 || (val & 1) == 0) ++ return ret; + udelay(10); + cond_resched(); +- if (timeout >= 50000) { +- ql_log(ql_log_warn, vha, 0xb00d, +- "Timeout reached waiting for write finish.\n"); +- return -1; +- } + } +- return ret; ++ ql_log(ql_log_warn, vha, 0xb00d, ++ "Timeout reached waiting for write finish.\n"); ++ return -1; + } + + static int diff --git a/patches.suse/scsi-qla2xxx-Make-qla_set_ini_mode-return-void.patch b/patches.suse/scsi-qla2xxx-Make-qla_set_ini_mode-return-void.patch new file mode 100644 index 0000000..5ffcb30 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Make-qla_set_ini_mode-return-void.patch @@ -0,0 +1,44 @@ +From: Jason Yan +Date: Wed, 29 Apr 2020 22:09:52 +0800 +Subject: scsi: qla2xxx: Make qla_set_ini_mode() return void +Patch-mainline: v5.8-rc1 +Git-commit: 1b007f96f9e063f9f0b93597a4089114a89c1854 +References: bsc#1171688 bsc#1174003 + +The return value is not used by the caller and the local variable 'rc' is +not needed. Make qla_set_ini_mode() return void and remove 'rc'. This also +fixes the following coccicheck warning: + +drivers/scsi/qla2xxx/qla_attr.c:1906:5-7: Unneeded variable: "rc". +Return "0" on line 2180 + +Link: https://lore.kernel.org/r/20200429140952.8240-1-yanaijie@huawei.com +Signed-off-by: Jason Yan +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_attr.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -1922,9 +1922,8 @@ static char *mode_to_str[] = { + }; + + #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) +-static int qla_set_ini_mode(scsi_qla_host_t *vha, int op) ++static void qla_set_ini_mode(scsi_qla_host_t *vha, int op) + { +- int rc = 0; + enum { + NO_ACTION, + MODE_CHANGE_ACCEPT, +@@ -2197,8 +2196,6 @@ static int qla_set_ini_mode(scsi_qla_hos + vha->ql2xexchoffld, vha->u_ql2xexchoffld); + break; + } +- +- return rc; + } + + static ssize_t diff --git a/patches.suse/scsi-qla2xxx-Make-qlafx00_process_aen-return-void.patch b/patches.suse/scsi-qla2xxx-Make-qlafx00_process_aen-return-void.patch new file mode 100644 index 0000000..f369b28 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Make-qlafx00_process_aen-return-void.patch @@ -0,0 +1,58 @@ +From: Jason Yan +Date: Wed, 6 May 2020 14:17:57 +0800 +Subject: scsi: qla2xxx: Make qlafx00_process_aen() return void +Patch-mainline: v5.8-rc1 +Git-commit: 88bfdf565cbe33524308d912777f4267981d4be0 +References: bsc#1171688 bsc#1174003 + +No other functions use the return value of qlafx00_process_aen() and the +return value is always 0 now. Make it return void. This fixes the following +coccicheck warning: + +drivers/scsi/qla2xxx/qla_mr.c:1716:5-9: Unneeded variable: "rval". +Return "0" on line 1768 + +Link: https://lore.kernel.org/r/20200506061757.19536-1-yanaijie@huawei.com +Reviewed-by: Bart Van Assche +Signed-off-by: Jason Yan +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_gbl.h | 2 +- + drivers/scsi/qla2xxx/qla_mr.c | 5 +---- + 2 files changed, 2 insertions(+), 5 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_gbl.h ++++ b/drivers/scsi/qla2xxx/qla_gbl.h +@@ -771,7 +771,7 @@ extern int qlafx00_fw_ready(scsi_qla_hos + extern int qlafx00_configure_devices(scsi_qla_host_t *); + extern int qlafx00_reset_initialize(scsi_qla_host_t *); + extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t); +-extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); ++extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); + extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t, + uint32_t *, int); + extern uint32_t qlafx00_fw_state_show(struct device *, +--- a/drivers/scsi/qla2xxx/qla_mr.c ++++ b/drivers/scsi/qla2xxx/qla_mr.c +@@ -1710,10 +1710,9 @@ qlafx00_tgt_detach(struct scsi_qla_host + return; + } + +-int ++void + qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) + { +- int rval = 0; + uint32_t aen_code, aen_data; + + aen_code = FCH_EVT_VENDOR_UNIQUE; +@@ -1764,8 +1763,6 @@ qlafx00_process_aen(struct scsi_qla_host + + fc_host_post_event(vha->host, fc_get_event_number(), + aen_code, aen_data); +- +- return rval; + } + + static void diff --git a/patches.suse/scsi-qla2xxx-Reduce-noisy-debug-message.patch b/patches.suse/scsi-qla2xxx-Reduce-noisy-debug-message.patch new file mode 100644 index 0000000..7c196cc --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Reduce-noisy-debug-message.patch @@ -0,0 +1,32 @@ +From: Quinn Tran +Date: Thu, 6 Aug 2020 04:10:08 -0700 +Subject: scsi: qla2xxx: Reduce noisy debug message +Patch-mainline: v5.9-rc2 +Git-commit: 81b9d1e19d62bf876c3985dbaf53a3a50eedd74b +References: bsc#1171688 bsc#1174003 + +Update debug level and message for ELS IOCB done. + +Link: https://lore.kernel.org/r/20200806111014.28434-6-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Quinn Tran +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_isr.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -2024,8 +2024,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vh + res = DID_ERROR << 16; + } + } +- ql_dbg(ql_dbg_user, vha, 0x503f, +- "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", ++ ql_dbg(ql_dbg_disc, vha, 0x503f, ++ "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", + type, sp->handle, comp_status, fw_status[1], fw_status[2], + le32_to_cpu(ese->total_byte_count)); + goto els_ct_done; diff --git a/patches.suse/scsi-qla2xxx-Remove-a-superfluous-cast.patch b/patches.suse/scsi-qla2xxx-Remove-a-superfluous-cast.patch new file mode 100644 index 0000000..b022eff --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Remove-a-superfluous-cast.patch @@ -0,0 +1,37 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:50 -0700 +Subject: scsi: qla2xxx: Remove a superfluous cast +Patch-mainline: v5.9-rc1 +Git-commit: 9bb013584a5ea18dfae89e33ded130cae08eb135 +References: bsc#1171688 bsc#1174003 + +Remove an unnecessary cast because it prevents the compiler to perform type +checking. + +Link: https://lore.kernel.org/r/20200629225454.22863-6-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Shyam Sundar +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_bsg.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_bsg.c ++++ b/drivers/scsi/qla2xxx/qla_bsg.c +@@ -223,8 +223,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct bsg + + /* validate fcp priority data */ + +- if (!qla24xx_fcp_prio_cfg_valid(vha, +- (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { ++ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { + bsg_reply->result = (DID_ERROR << 16); + ret = -EINVAL; + /* If buffer was invalidatic int diff --git a/patches.suse/scsi-qla2xxx-Remove-an-unused-function.patch b/patches.suse/scsi-qla2xxx-Remove-an-unused-function.patch new file mode 100644 index 0000000..ad4519e --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Remove-an-unused-function.patch @@ -0,0 +1,77 @@ +From: Bart Van Assche +Date: Tue, 19 May 2020 21:07:38 -0700 +Subject: scsi: qla2xxx: Remove an unused function +Patch-mainline: v5.8-rc1 +Git-commit: ce9a9321c11817e54ab36d75c2fab62537fcc993 +References: bsc#1171688 bsc#1174003 + +This was detected by building the qla2xxx driver with clang. See also +commit a9083016a531 ("[SCSI] qla2xxx: Add ISP82XX support"). + +Link: https://lore.kernel.org/r/20200520040738.1017-1-bvanassche@acm.org +Cc: Arun Easi +Cc: Nilesh Javali +Cc: Himanshu Madhani +Cc: Hannes Reinecke +Cc: Daniel Wagner +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Reviewed-by: Daniel Wagner +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_nx.c | 41 ----------------------------------------- + 1 file changed, 41 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -380,47 +380,6 @@ qla82xx_pci_set_crbwindow_2M(struct qla_ + *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; + } + +-static inline unsigned long +-qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) +-{ +- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); +- /* See if we are currently pointing to the region we want to use next */ +- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { +- /* No need to change window. PCIX and PCIEregs are in both +- * regs are in both windows. +- */ +- return off; +- } +- +- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { +- /* We are in first CRB window */ +- if (ha->curr_window != 0) +- WARN_ON(1); +- return off; +- } +- +- if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { +- /* We are in second CRB window */ +- off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; +- +- if (ha->curr_window != 1) +- return off; +- +- /* We are in the QM or direct access +- * register region - do nothing +- */ +- if ((off >= QLA82XX_PCI_DIRECT_CRB) && +- (off < QLA82XX_PCI_CAMQM_MAX)) +- return off; +- } +- /* strange address given */ +- ql_dbg(ql_dbg_p3p, vha, 0xb001, +- "%s: Warning: unm_nic_pci_set_crbwindow " +- "called with an unknown address(%llx).\n", +- QLA2XXX_DRIVER_NAME, off); +- return off; +-} +- + static int + qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, + void __iomem **off_out) diff --git a/patches.suse/scsi-qla2xxx-Remove-return-value-from-qla_nvme_ls.patch b/patches.suse/scsi-qla2xxx-Remove-return-value-from-qla_nvme_ls.patch new file mode 100644 index 0000000..d6c20d8 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Remove-return-value-from-qla_nvme_ls.patch @@ -0,0 +1,46 @@ +From: Daniel Wagner +Date: Wed, 20 May 2020 15:08:19 +0200 +Subject: scsi: qla2xxx: Remove return value from qla_nvme_ls() +Patch-mainline: v5.8-rc1 +Git-commit: ac988c49367a52b466285239361ede0f74c672da +References: bsc#1171688 bsc#1174003 + +The function always returns QLA_SUCCESS and the caller qla2x00_start_sp() +doesn't even evalute the return value. So there is no point in returning a +status. + +Link: https://lore.kernel.org/r/20200520130819.90625-1-dwagner@suse.de +Reviewed-by: Bart Van Assche +Reviewed-by: Johannes Thumshirn +Reviewed-by: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Signed-off-by: Daniel Wagner +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_iocb.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_iocb.c ++++ b/drivers/scsi/qla2xxx/qla_iocb.c +@@ -3610,11 +3610,10 @@ static void qla2x00_send_notify_ack_iocb + /* + * Build NVME LS request + */ +-static int ++static void + qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) + { + struct srb_iocb *nvme; +- int rval = QLA_SUCCESS; + + nvme = &sp->u.iocb_cmd; + cmd_pkt->entry_type = PT_LS4_REQUEST; +@@ -3634,8 +3633,6 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_req + cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); + cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); + put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); +- +- return rval; + } + + static void diff --git a/patches.suse/scsi-qla2xxx-Remove-the-__packed-annotation-from-str.patch b/patches.suse/scsi-qla2xxx-Remove-the-__packed-annotation-from-str.patch new file mode 100644 index 0000000..f03a827 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Remove-the-__packed-annotation-from-str.patch @@ -0,0 +1,45 @@ +From: Bart Van Assche +Date: Mon, 29 Jun 2020 15:54:47 -0700 +Subject: scsi: qla2xxx: Remove the __packed annotation from struct fcp_hdr and + fcp_hdr_le +Patch-mainline: v5.9-rc1 +Git-commit: f1e12bee55e6eb28a53662765d5d2d96fa0247b1 +References: bsc#1171688 bsc#1174003 + +Remove the __packed annotation from struct fcp_hdr* because that annotation +is not necessary for these data structures. + +Link: https://lore.kernel.org/r/20200629225454.22863-3-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_target.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_target.h ++++ b/drivers/scsi/qla2xxx/qla_target.h +@@ -267,7 +267,7 @@ struct fcp_hdr { + __be16 ox_id; + uint16_t rx_id; + __le32 parameter; +-} __packed; ++}; + + struct fcp_hdr_le { + le_id_t d_id; +@@ -282,7 +282,7 @@ struct fcp_hdr_le { + __le16 rx_id; + __le16 ox_id; + __le32 parameter; +-} __packed; ++}; + + #define F_CTL_EXCH_CONTEXT_RESP BIT_23 + #define F_CTL_SEQ_CONTEXT_RESIP BIT_22 diff --git a/patches.suse/scsi-qla2xxx-SAN-congestion-management-implementatio.patch b/patches.suse/scsi-qla2xxx-SAN-congestion-management-implementatio.patch new file mode 100644 index 0000000..97bf7f0 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-SAN-congestion-management-implementatio.patch @@ -0,0 +1,612 @@ +From: Shyam Sundar +Date: Tue, 30 Jun 2020 03:22:29 -0700 +Subject: scsi: qla2xxx: SAN congestion management implementation +Patch-mainline: v5.9-rc1 +Git-commit: 9f2475fe7406b8ef5f97099c4980021344872d9f +References: bsc#1171688 bsc#1174003 + +* Firmware Initialization with SCM enabled based on NVRAM setting and + firmware support (About Firmware). + +* Enable PUREX and add support for fabric performance impact + notification (FPIN) handling. + +* Allocate a default PUREX item for each vha to handle memory allocation + failures in ISR. + +Link: https://lore.kernel.org/r/20200630102229.29660-3-njavali@marvell.com +Reviewed-by: Himanshu Madhani +Reviewed-by: James Smart +Signed-off-by: Shyam Sundar +Signed-off-by: Arun Easi +Signed-off-by: Nilesh Javali +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_dbg.c | 13 -- + drivers/scsi/qla2xxx/qla_def.h | 49 ++++++++++ + drivers/scsi/qla2xxx/qla_fw.h | 6 + + drivers/scsi/qla2xxx/qla_gbl.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 9 + + drivers/scsi/qla2xxx/qla_isr.c | 190 ++++++++++++++++++++++++++++++++++++---- + drivers/scsi/qla2xxx/qla_mbx.c | 42 ++++++++ + drivers/scsi/qla2xxx/qla_os.c | 18 +++ + 8 files changed, 295 insertions(+), 33 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -11,10 +11,8 @@ + * ---------------------------------------------------------------------- + * | Level | Last Value Used | Holes | + * ---------------------------------------------------------------------- +- * | Module Init and Probe | 0x0193 | 0x0146 | +- * | | | 0x015b-0x0160 | +- * | | | 0x016e | +- * | Mailbox commands | 0x1206 | 0x11a2-0x11ff | ++ * | Module Init and Probe | 0x0199 | | ++ * | Mailbox commands | 0x1206 | 0x11a5-0x11ff | + * | Device Discovery | 0x2134 | 0x210e-0x2116 | + * | | | 0x211a | + * | | | 0x211c-0x2128 | +@@ -26,11 +24,7 @@ + * | | | 0x3036,0x3038 | + * | | | 0x303a | + * | DPC Thread | 0x4023 | 0x4002,0x4013 | +- * | Async Events | 0x5090 | 0x502b-0x502f | +- * | | | 0x5047 | +- * | | | 0x5084,0x5075 | +- * | | | 0x503d,0x5044 | +- * | | | 0x505f | ++ * | Async Events | 0x509c | | + * | Timer Routines | 0x6012 | | + * | User Space Interactions | 0x70e3 | 0x7018,0x702e | + * | | | 0x7020,0x7024 | +@@ -2662,7 +2656,6 @@ ql_dump_regs(uint level, scsi_qla_host_t + "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); + } + +- + void + ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, + uint size) +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -1055,6 +1055,7 @@ static inline bool qla2xxx_is_valid_mbs( + #define MBA_LIP_F8 0x8016 /* Received a LIP F8. */ + #define MBA_LOOP_INIT_ERR 0x8017 /* Loop Initialization Error. */ + #define MBA_FABRIC_AUTH_REQ 0x801b /* Fabric Authentication Required. */ ++#define MBA_CONGN_NOTI_RECV 0x801e /* Congestion Notification Received */ + #define MBA_SCSI_COMPLETION 0x8020 /* SCSI Command Complete. */ + #define MBA_CTIO_COMPLETION 0x8021 /* CTIO Complete. */ + #define MBA_IP_COMPLETION 0x8022 /* IP Transmit Command Complete. */ +@@ -1510,6 +1511,25 @@ typedef struct { + uint8_t reserved_3[26]; + } init_cb_t; + ++/* Special Features Control Block */ ++struct init_sf_cb { ++ uint8_t format; ++ uint8_t reserved0; ++ /* ++ * BIT 15-14 = Reserved ++ * BIT_13 = SAN Congestion Management (1 - Enabled, 0 - Disabled) ++ * BIT_12 = Remote Write Optimization (1 - Enabled, 0 - Disabled) ++ * BIT 11-0 = Reserved ++ */ ++ uint16_t flags; ++ uint8_t reserved1[32]; ++ uint16_t discard_OHRB_timeout_value; ++ uint16_t remote_write_opt_queue_num; ++ uint8_t reserved2[40]; ++ uint8_t scm_related_parameter[16]; ++ uint8_t reserved3[32]; ++}; ++ + /* + * Get Link Status mailbox command return buffer. + */ +@@ -2183,6 +2203,8 @@ typedef struct { + struct dsd64 rsp_dsd; + } ms_iocb_entry_t; + ++#define SCM_EDC_ACC_RECEIVED BIT_6 ++#define SCM_RDF_ACC_RECEIVED BIT_7 + + /* + * ISP queue - Mailbox Command entry structure definition. +@@ -3852,6 +3874,12 @@ struct qla_hw_data { + uint32_t n2n_bigger:1; + uint32_t secure_adapter:1; + uint32_t secure_fw:1; ++ /* Supported by Adapter */ ++ uint32_t scm_supported_a:1; ++ /* Supported by Firmware */ ++ uint32_t scm_supported_f:1; ++ /* Enabled in Driver */ ++ uint32_t scm_enabled:1; + } flags; + + uint16_t max_exchg; +@@ -4169,6 +4197,13 @@ struct qla_hw_data { + int init_cb_size; + dma_addr_t ex_init_cb_dma; + struct ex_init_cb_81xx *ex_init_cb; ++ dma_addr_t sf_init_cb_dma; ++ struct init_sf_cb *sf_init_cb; ++ ++ void *scm_fpin_els_buff; ++ uint64_t scm_fpin_els_buff_size; ++ bool scm_fpin_valid; ++ bool scm_fpin_payload_size; + + void *async_pd; + dma_addr_t async_pd_dma; +@@ -4231,6 +4266,12 @@ struct qla_hw_data { + #define FW_ATTR_H_NVME BIT_10 + #define FW_ATTR_H_NVME_UPDATED BIT_14 + ++ /* About firmware SCM support */ ++#define FW_ATTR_EXT0_SCM_SUPPORTED BIT_12 ++ /* Brocade fabric attached */ ++#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000 ++ /* Cisco fabric attached */ ++#define FW_ATTR_EXT0_SCM_CISCO 0x00002000 + uint16_t fw_attributes_ext[2]; + uint32_t fw_memory_size; + uint32_t fw_transfer_size; +@@ -4541,6 +4582,13 @@ struct purex_item { + } iocb; + }; + ++#define SCM_FLAG_RDF_REJECT 0x00 ++#define SCM_FLAG_RDF_COMPLETED 0x01 ++ ++#define QLA_CON_PRIMITIVE_RECEIVED 0x1 ++#define QLA_CONGESTION_ARB_WARNING 0x1 ++#define QLA_CONGESTION_ARB_ALARM 0X2 ++ + /* + * Qlogic scsi host structure + */ +@@ -4749,6 +4797,7 @@ typedef struct scsi_qla_host { + __le16 dport_data[4]; + struct list_head gpnid_list; + struct fab_scan scan; ++ uint8_t scm_fabric_connection_flags; + + unsigned int irq_offset; + } scsi_qla_host_t; +--- a/drivers/scsi/qla2xxx/qla_fw.h ++++ b/drivers/scsi/qla2xxx/qla_fw.h +@@ -723,6 +723,8 @@ struct ct_entry_24xx { + struct dsd64 dsd[2]; + }; + ++#define PURX_ELS_HEADER_SIZE 0x18 ++ + /* + * ISP queue - PUREX IOCB entry structure definition + */ +@@ -2020,7 +2022,9 @@ struct nvram_81xx { + * BIT 0 = Extended BB credits for LR + * BIT 1 = Virtual Fabric Enable + * BIT 2-5 = Distance Support if BIT 0 is on +- * BIT 6-15 = Unused ++ * BIT 6 = Prefer FCP ++ * BIT 7 = SCM Disabled if BIT is set (1) ++ * BIT 8-15 = Unused + */ + uint16_t enhanced_features; + +--- a/drivers/scsi/qla2xxx/qla_gbl.h ++++ b/drivers/scsi/qla2xxx/qla_gbl.h +@@ -127,6 +127,7 @@ int qla_post_iidma_work(struct scsi_qla_ + void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); + int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *); + void qla_rscn_replay(fc_port_t *fcport); ++void qla24xx_free_purex_item(struct purex_item *item); + extern bool qla24xx_risc_firmware_invalid(uint32_t *); + + /* +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -3749,7 +3749,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) + } + + /* Enable PUREX PASSTHRU */ +- if (ql2xrdpenable) ++ if (ql2xrdpenable || ha->flags.scm_supported_f) + qla25xx_set_els_cmds_supported(vha); + } else + goto failed; +@@ -3962,7 +3962,7 @@ qla24xx_update_fw_options(scsi_qla_host_ + ha->fw_options[2] &= ~BIT_8; + } + +- if (ql2xrdpenable) ++ if (ql2xrdpenable || ha->flags.scm_supported_f) + ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; + + /* Enable Async 8130/8131 events -- transceiver insertion/removal */ +@@ -8519,6 +8519,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vh + icb->node_name[0] &= 0xF0; + } + ++ if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { ++ if ((nv->enhanced_features & BIT_7) == 0) ++ ha->flags.scm_supported_a = 1; ++ } ++ + /* Set host adapter parameters. */ + ha->flags.disable_risc_code_load = 0; + ha->flags.enable_lip_reset = 0; +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -22,6 +22,31 @@ static void qla2x00_status_entry(scsi_ql + static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); + static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, + sts_entry_t *); ++static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, ++ struct purex_item *item); ++static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, ++ uint16_t size); ++static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, ++ void *pkt); ++static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, ++ void **pkt, struct rsp_que **rsp); ++ ++static void ++qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) ++{ ++ void *pkt = &item->iocb; ++ uint16_t pkt_size = item->size; ++ ++ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, ++ "%s: Enter\n", __func__); ++ ++ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, ++ "-------- ELS REQ -------\n"); ++ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, ++ pkt, pkt_size); ++ ++ fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); ++} + + const char *const port_state_str[] = { + "Unknown", +@@ -819,7 +844,7 @@ qla24xx_queue_purex_item(scsi_qla_host_t + * @vha: SCSI driver HA context + * @pkt: ELS packet + */ +-struct purex_item ++static struct purex_item + *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) + { + struct purex_item *item; +@@ -834,6 +859,111 @@ struct purex_item + } + + /** ++ * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can ++ * span over multiple IOCBs. ++ * @vha: SCSI driver HA context ++ * @pkt: ELS packet ++ * @rsp: Response queue ++ */ ++static struct purex_item * ++qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, ++ struct rsp_que **rsp) ++{ ++ struct purex_entry_24xx *purex = *pkt; ++ struct rsp_que *rsp_q = *rsp; ++ sts_cont_entry_t *new_pkt; ++ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; ++ uint16_t buffer_copy_offset = 0; ++ uint16_t entry_count, entry_count_remaining; ++ struct purex_item *item; ++ void *fpin_pkt = NULL; ++ ++ total_bytes = le16_to_cpu(purex->frame_size & 0x0FFF) ++ - PURX_ELS_HEADER_SIZE; ++ pending_bytes = total_bytes; ++ entry_count = entry_count_remaining = purex->entry_count; ++ no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? ++ sizeof(purex->els_frame_payload) : pending_bytes; ++ ql_log(ql_log_info, vha, 0x509a, ++ "FPIN ELS, frame_size 0x%x, entry count %d\n", ++ total_bytes, entry_count); ++ ++ item = qla24xx_alloc_purex_item(vha, total_bytes); ++ if (!item) ++ return item; ++ ++ fpin_pkt = &item->iocb; ++ ++ memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); ++ buffer_copy_offset += no_bytes; ++ pending_bytes -= no_bytes; ++ --entry_count_remaining; ++ ++ ((response_t *)purex)->signature = RESPONSE_PROCESSED; ++ wmb(); ++ ++ do { ++ while ((total_bytes > 0) && (entry_count_remaining > 0)) { ++ if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { ++ ql_dbg(ql_dbg_async, vha, 0x5084, ++ "Ran out of IOCBs, partial data 0x%x\n", ++ buffer_copy_offset); ++ cpu_relax(); ++ continue; ++ } ++ ++ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; ++ *pkt = new_pkt; ++ ++ if (new_pkt->entry_type != STATUS_CONT_TYPE) { ++ ql_log(ql_log_warn, vha, 0x507a, ++ "Unexpected IOCB type, partial data 0x%x\n", ++ buffer_copy_offset); ++ break; ++ } ++ ++ rsp_q->ring_index++; ++ if (rsp_q->ring_index == rsp_q->length) { ++ rsp_q->ring_index = 0; ++ rsp_q->ring_ptr = rsp_q->ring; ++ } else { ++ rsp_q->ring_ptr++; ++ } ++ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? ++ sizeof(new_pkt->data) : pending_bytes; ++ if ((buffer_copy_offset + no_bytes) <= total_bytes) { ++ memcpy(((uint8_t *)fpin_pkt + ++ buffer_copy_offset), new_pkt->data, ++ no_bytes); ++ buffer_copy_offset += no_bytes; ++ pending_bytes -= no_bytes; ++ --entry_count_remaining; ++ } else { ++ ql_log(ql_log_warn, vha, 0x5044, ++ "Attempt to copy more that we got, optimizing..%x\n", ++ buffer_copy_offset); ++ memcpy(((uint8_t *)fpin_pkt + ++ buffer_copy_offset), new_pkt->data, ++ total_bytes - buffer_copy_offset); ++ } ++ ++ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; ++ wmb(); ++ } ++ ++ if (pending_bytes != 0 || entry_count_remaining != 0) { ++ ql_log(ql_log_fatal, vha, 0x508b, ++ "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", ++ total_bytes, entry_count_remaining); ++ qla24xx_free_purex_item(item); ++ return NULL; ++ } ++ } while (entry_count_remaining > 0); ++ host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); ++ return item; ++} ++ ++/** + * qla2x00_async_event() - Process aynchronous events. + * @vha: SCSI driver HA context + * @rsp: response queue +@@ -1346,6 +1476,19 @@ qla2x00_async_event(scsi_qla_host_t *vha + qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); + } + break; ++ case MBA_CONGN_NOTI_RECV: ++ if (!ha->flags.scm_enabled || ++ mb[1] != QLA_CON_PRIMITIVE_RECEIVED) ++ break; ++ ++ if (mb[2] == QLA_CONGESTION_ARB_WARNING) { ++ ql_dbg(ql_dbg_async, vha, 0x509b, ++ "Congestion Warning %04x %04x.\n", mb[1], mb[2]); ++ } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { ++ ql_log(ql_log_warn, vha, 0x509b, ++ "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); ++ } ++ break; + /* case MBA_RIO_RESPONSE: */ + case MBA_ZIO_RESPONSE: + ql_dbg(ql_dbg_async, vha, 0x5015, +@@ -3273,6 +3416,7 @@ void qla24xx_process_response_queue(stru + { + struct sts_entry_24xx *pkt; + struct qla_hw_data *ha = vha->hw; ++ struct purex_entry_24xx *purex_entry; + struct purex_item *pure_item; + + if (!ha->flags.fw_started) +@@ -3328,7 +3472,6 @@ void qla24xx_process_response_queue(stru + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; +- + qla24xx_queue_purex_item(vha, pure_item, + qla24xx_process_abts); + break; +@@ -3378,29 +3521,40 @@ void qla24xx_process_response_queue(stru + (struct vp_ctrl_entry_24xx *)pkt); + break; + case PUREX_IOCB_TYPE: +- { +- struct purex_entry_24xx *purex = (void *)pkt; +- +- if (purex->els_frame_payload[3] != ELS_RDP) { +- ql_dbg(ql_dbg_init, vha, 0x5091, +- "Discarding ELS Request opcode %#x...\n", +- purex->els_frame_payload[3]); ++ purex_entry = (void *)pkt; ++ switch (purex_entry->els_frame_payload[3]) { ++ case ELS_RDP: ++ pure_item = qla24xx_copy_std_pkt(vha, pkt); ++ if (!pure_item) ++ break; ++ qla24xx_queue_purex_item(vha, pure_item, ++ qla24xx_process_purex_rdp); + break; +- } +- pure_item = qla24xx_copy_std_pkt(vha, pkt); +- if (!pure_item) ++ case ELS_FPIN: ++ if (!vha->hw->flags.scm_enabled) { ++ ql_log(ql_log_warn, vha, 0x5094, ++ "SCM not active for this port\n"); ++ break; ++ } ++ pure_item = qla27xx_copy_fpin_pkt(vha, ++ (void **)&pkt, &rsp); ++ if (!pure_item) ++ break; ++ qla24xx_queue_purex_item(vha, pure_item, ++ qla27xx_process_purex_fpin); + break; + +- qla24xx_queue_purex_item(vha, pure_item, +- qla24xx_process_purex_rdp); ++ default: ++ ql_log(ql_log_warn, vha, 0x509c, ++ "Discarding ELS Request opcode 0x%x\n", ++ purex_entry->els_frame_payload[3]); ++ } + break; +- } + default: + /* Type Not Supported. */ + ql_dbg(ql_dbg_async, vha, 0x5042, +- "Received unknown response pkt type %x " +- "entry status=%x.\n", +- pkt->entry_type, pkt->entry_status); ++ "Received unknown response pkt type 0x%x entry status=%x.\n", ++ pkt->entry_type, pkt->entry_status); + break; + } + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -1125,6 +1125,16 @@ qla2x00_get_fw_version(scsi_qla_host_t * + (ha->flags.secure_fw) ? "Supported" : + "Not Supported"); + } ++ ++ if (ha->flags.scm_supported_a && ++ (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { ++ ha->flags.scm_supported_f = 1; ++ memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb)); ++ ha->sf_init_cb->flags |= BIT_13; ++ } ++ ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", ++ (ha->flags.scm_supported_f) ? "Supported" : ++ "Not Supported"); + } + + failed: +@@ -1634,8 +1644,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t * + mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; + if (IS_FWI2_CAPABLE(vha->hw)) + mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; +- if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) ++ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { + mcp->in_mb |= MBX_15; ++ mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; ++ } ++ + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); +@@ -1688,8 +1701,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t * + } + } + +- if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) ++ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { + vha->bbcr = mcp->mb[15]; ++ if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { ++ ql_log(ql_log_info, vha, 0x11a4, ++ "SCM: EDC ELS completed, flags 0x%x\n", ++ mcp->mb[21]); ++ } ++ if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { ++ vha->hw->flags.scm_enabled = 1; ++ vha->scm_fabric_connection_flags |= ++ SCM_FLAG_RDF_COMPLETED; ++ ql_log(ql_log_info, vha, 0x11a5, ++ "SCM: RDF ELS completed, flags 0x%x\n", ++ mcp->mb[23]); ++ } ++ } + } + + return rval; +@@ -1802,6 +1829,17 @@ qla2x00_init_firmware(scsi_qla_host_t *v + mcp->mb[14] = sizeof(*ha->ex_init_cb); + mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; + } ++ ++ if (ha->flags.scm_supported_f) { ++ mcp->mb[1] |= BIT_1; ++ mcp->mb[16] = MSW(ha->sf_init_cb_dma); ++ mcp->mb[17] = LSW(ha->sf_init_cb_dma); ++ mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); ++ mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); ++ mcp->mb[15] = sizeof(*ha->sf_init_cb); ++ mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; ++ } ++ + /* 1 and 2 should normally be captured. */ + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -4232,6 +4232,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha + "ex_init_cb=%p.\n", ha->ex_init_cb); + } + ++ /* Get consistent memory allocated for Special Features-CB. */ ++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ++ ha->sf_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, ++ &ha->sf_init_cb_dma); ++ if (!ha->sf_init_cb) ++ goto fail_sf_init_cb; ++ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, ++ "sf_init_cb=%p.\n", ha->sf_init_cb); ++ } ++ + INIT_LIST_HEAD(&ha->gbl_dsd_list); + + /* Get consistent memory allocated for Async Port-Database. */ +@@ -4284,6 +4294,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha + fail_loop_id_map: + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); + fail_async_pd: ++ dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma); ++fail_sf_init_cb: + dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); + fail_ex_init_cb: + kfree(ha->npiv_info); +@@ -4706,6 +4718,10 @@ qla2x00_mem_free(struct qla_hw_data *ha) + ha->ms_iocb = NULL; + ha->ms_iocb_dma = 0; + ++ if (ha->sf_init_cb) ++ dma_pool_free(ha->s_dma_pool, ++ ha->sf_init_cb, ha->sf_init_cb_dma); ++ + if (ha->ex_init_cb) + dma_pool_free(ha->s_dma_pool, + ha->ex_init_cb, ha->ex_init_cb_dma); +@@ -4793,6 +4809,8 @@ qla2x00_mem_free(struct qla_hw_data *ha) + kfree(ha->swl); + ha->swl = NULL; + kfree(ha->loop_id_map); ++ ha->sf_init_cb = NULL; ++ ha->sf_init_cb_dma = 0; + ha->loop_id_map = NULL; + } + diff --git a/patches.suse/scsi-qla2xxx-Set-NVMe-status-code-for-failed-NVMe-FC.patch b/patches.suse/scsi-qla2xxx-Set-NVMe-status-code-for-failed-NVMe-FC.patch index 1d3f798..dbf740a 100644 --- a/patches.suse/scsi-qla2xxx-Set-NVMe-status-code-for-failed-NVMe-FC.patch +++ b/patches.suse/scsi-qla2xxx-Set-NVMe-status-code-for-failed-NVMe-FC.patch @@ -32,7 +32,7 @@ Signed-off-by: Martin K. Petersen @@ -139,11 +139,12 @@ static void qla_nvme_release_fcp_cmd_kre sp->priv = NULL; if (priv->comp_status == QLA_SUCCESS) { - fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; + fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + fd->status = NVME_SC_SUCCESS; } else { fd->rcv_rsplen = 0; diff --git a/patches.suse/scsi-qla2xxx-Simplify-the-functions-for-dumping-firm.patch b/patches.suse/scsi-qla2xxx-Simplify-the-functions-for-dumping-firm.patch new file mode 100644 index 0000000..ab01707 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Simplify-the-functions-for-dumping-firm.patch @@ -0,0 +1,654 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:00 -0700 +Subject: scsi: qla2xxx: Simplify the functions for dumping firmware +Patch-mainline: v5.8-rc1 +Git-commit: 8ae178760b23e2055aecf8b8a54629cec7ccc58e +References: bsc#1171688 bsc#1174003 + +Instead of passing an argument to the firmware dumping functions that tells +these functions whether or not to obtain the hardware lock, obtain that +lock before calling these functions. This patch fixes the following +recently introduced C=2 build error: + + CHECK drivers/scsi/qla2xxx/qla_tmpl.c +drivers/scsi/qla2xxx/qla_tmpl.c:1133:1: error: Expected ; at end of statement +drivers/scsi/qla2xxx/qla_tmpl.c:1133:1: error: got } +drivers/scsi/qla2xxx/qla_tmpl.h:247:0: error: Expected } at end of function +drivers/scsi/qla2xxx/qla_tmpl.h:247:0: error: got end-of-input + +Link: https://lore.kernel.org/r/20200518211712.11395-4-bvanassche@acm.org +Fixes: cbb01c2f2f63 ("scsi: qla2xxx: Fix MPI failure AEN (8200) handling") +Cc: Arun Easi +Cc: Nilesh Javali +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Hannes Reinecke +Reviewed-by: Daniel Wagner +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_bsg.c | 4 + drivers/scsi/qla2xxx/qla_dbg.c | 153 +++++++++----------------------------- + drivers/scsi/qla2xxx/qla_def.h | 2 + drivers/scsi/qla2xxx/qla_gbl.h | 19 ++-- + drivers/scsi/qla2xxx/qla_isr.c | 12 +- + drivers/scsi/qla2xxx/qla_mbx.c | 6 - + drivers/scsi/qla2xxx/qla_nx.c | 2 + drivers/scsi/qla2xxx/qla_nx2.c | 2 + drivers/scsi/qla2xxx/qla_os.c | 2 + drivers/scsi/qla2xxx/qla_target.c | 4 + drivers/scsi/qla2xxx/qla_tmpl.c | 19 ---- + 11 files changed, 70 insertions(+), 155 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_bsg.c ++++ b/drivers/scsi/qla2xxx/qla_bsg.c +@@ -691,7 +691,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_ + * dump and reset the chip. + */ + if (ret) { +- ha->isp_ops->fw_dump(vha, 0); ++ qla2xxx_dump_fw(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + rval = -EINVAL; +@@ -896,7 +896,7 @@ qla2x00_process_loopback(struct bsg_job + * doesn't work take FCoE dump and then + * reset the chip. + */ +- ha->isp_ops->fw_dump(vha, 0); ++ qla2xxx_dump_fw(vha); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -716,35 +716,37 @@ qla2xxx_dump_post_process(scsi_qla_host_ + } + } + ++void qla2xxx_dump_fw(scsi_qla_host_t *vha) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&vha->hw->hardware_lock, flags); ++ vha->hw->isp_ops->fw_dump(vha); ++ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); ++} ++ + /** + * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. + * @vha: HA context +- * @hardware_locked: Called with the hardware_lock + */ + void +-qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla2300_fw_dump(scsi_qla_host_t *vha) + { + int rval; + uint32_t cnt; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint16_t __iomem *dmp_reg; +- unsigned long flags; + struct qla2300_fw_dump *fw; + void *nxt; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + +- flags = 0; +- +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&ha->hardware_lock, flags); +-#endif ++ lockdep_assert_held(&ha->hardware_lock); + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd002, + "No buffer available for dump.\n"); +- goto qla2300_fw_dump_failed; ++ return; + } + + if (ha->fw_dumped) { +@@ -752,7 +754,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, in + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); +- goto qla2300_fw_dump_failed; ++ return; + } + fw = &ha->fw_dump->isp.isp23; + qla2xxx_prep_dump(ha, ha->fw_dump); +@@ -876,48 +878,31 @@ qla2300_fw_dump(scsi_qla_host_t *vha, in + qla2xxx_copy_queues(ha, nxt); + + qla2xxx_dump_post_process(base_vha, rval); +- +-qla2300_fw_dump_failed: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&ha->hardware_lock, flags); +-#else +- ; +-#endif + } + + /** + * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. + * @vha: HA context +- * @hardware_locked: Called with the hardware_lock + */ + void +-qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla2100_fw_dump(scsi_qla_host_t *vha) + { + int rval; + uint32_t cnt, timer; +- uint16_t risc_address; +- uint16_t mb0, mb2; ++ uint16_t risc_address = 0; ++ uint16_t mb0 = 0, mb2 = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; + uint16_t __iomem *dmp_reg; +- unsigned long flags; + struct qla2100_fw_dump *fw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + +- risc_address = 0; +- mb0 = mb2 = 0; +- flags = 0; +- +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&ha->hardware_lock, flags); +-#endif ++ lockdep_assert_held(&ha->hardware_lock); + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd004, + "No buffer available for dump.\n"); +- goto qla2100_fw_dump_failed; ++ return; + } + + if (ha->fw_dumped) { +@@ -925,7 +910,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, in + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); +- goto qla2100_fw_dump_failed; ++ return; + } + fw = &ha->fw_dump->isp.isp21; + qla2xxx_prep_dump(ha, ha->fw_dump); +@@ -1080,18 +1065,10 @@ qla2100_fw_dump(scsi_qla_host_t *vha, in + qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); + + qla2xxx_dump_post_process(base_vha, rval); +- +-qla2100_fw_dump_failed: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&ha->hardware_lock, flags); +-#else +- ; +-#endif + } + + void +-qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla24xx_fw_dump(scsi_qla_host_t *vha) + { + int rval; + uint32_t cnt; +@@ -1100,28 +1077,23 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, in + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; +- unsigned long flags; + struct qla24xx_fw_dump *fw; + void *nxt; + void *nxt_chain; + uint32_t *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + ++ lockdep_assert_held(&ha->hardware_lock); ++ + if (IS_P3P_TYPE(ha)) + return; + +- flags = 0; + ha->fw_dump_cap_flags = 0; + +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&ha->hardware_lock, flags); +-#endif +- + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd006, + "No buffer available for dump.\n"); +- goto qla24xx_fw_dump_failed; ++ return; + } + + if (ha->fw_dumped) { +@@ -1129,7 +1101,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, in + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); +- goto qla24xx_fw_dump_failed; ++ return; + } + QLA_FW_STOPPED(ha); + fw = &ha->fw_dump->isp.isp24; +@@ -1339,18 +1311,10 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, in + + qla24xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +- +-qla24xx_fw_dump_failed: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&ha->hardware_lock, flags); +-#else +- ; +-#endif + } + + void +-qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla25xx_fw_dump(scsi_qla_host_t *vha) + { + int rval; + uint32_t cnt; +@@ -1359,24 +1323,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, in + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; +- unsigned long flags; + struct qla25xx_fw_dump *fw; + void *nxt, *nxt_chain; + uint32_t *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + +- flags = 0; +- ha->fw_dump_cap_flags = 0; ++ lockdep_assert_held(&ha->hardware_lock); + +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&ha->hardware_lock, flags); +-#endif ++ ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd008, + "No buffer available for dump.\n"); +- goto qla25xx_fw_dump_failed; ++ return; + } + + if (ha->fw_dumped) { +@@ -1384,7 +1343,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, in + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); +- goto qla25xx_fw_dump_failed; ++ return; + } + QLA_FW_STOPPED(ha); + fw = &ha->fw_dump->isp.isp25; +@@ -1665,18 +1624,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, in + + qla25xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +- +-qla25xx_fw_dump_failed: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&ha->hardware_lock, flags); +-#else +- ; +-#endif + } + + void +-qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla81xx_fw_dump(scsi_qla_host_t *vha) + { + int rval; + uint32_t cnt; +@@ -1685,24 +1636,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, in + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; +- unsigned long flags; + struct qla81xx_fw_dump *fw; + void *nxt, *nxt_chain; + uint32_t *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + +- flags = 0; +- ha->fw_dump_cap_flags = 0; ++ lockdep_assert_held(&ha->hardware_lock); + +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&ha->hardware_lock, flags); +-#endif ++ ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd00a, + "No buffer available for dump.\n"); +- goto qla81xx_fw_dump_failed; ++ return; + } + + if (ha->fw_dumped) { +@@ -1710,7 +1656,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, in + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", + ha->fw_dump); +- goto qla81xx_fw_dump_failed; ++ return; + } + fw = &ha->fw_dump->isp.isp81; + qla2xxx_prep_dump(ha, ha->fw_dump); +@@ -1993,18 +1939,10 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, in + + qla81xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +- +-qla81xx_fw_dump_failed: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&ha->hardware_lock, flags); +-#else +- ; +-#endif + } + + void +-qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla83xx_fw_dump(scsi_qla_host_t *vha) + { + int rval; + uint32_t cnt; +@@ -2013,31 +1951,26 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, in + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; +- unsigned long flags; + struct qla83xx_fw_dump *fw; + void *nxt, *nxt_chain; + uint32_t *last_chain = NULL; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + +- flags = 0; +- ha->fw_dump_cap_flags = 0; ++ lockdep_assert_held(&ha->hardware_lock); + +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&ha->hardware_lock, flags); +-#endif ++ ha->fw_dump_cap_flags = 0; + + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0xd00c, + "No buffer available for dump!!!\n"); +- goto qla83xx_fw_dump_failed; ++ return; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd00d, + "Firmware has been previously dumped (%p) -- ignoring " + "request...\n", ha->fw_dump); +- goto qla83xx_fw_dump_failed; ++ return; + } + QLA_FW_STOPPED(ha); + fw = &ha->fw_dump->isp.isp83; +@@ -2507,14 +2440,6 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, in + + qla83xx_fw_dump_failed_0: + qla2xxx_dump_post_process(base_vha, rval); +- +-qla83xx_fw_dump_failed: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&ha->hardware_lock, flags); +-#else +- ; +-#endif + } + + /****************************************************************************/ +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -3222,7 +3222,7 @@ struct isp_operations { + int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t, + uint32_t); + +- void (*fw_dump) (struct scsi_qla_host *, int); ++ void (*fw_dump)(struct scsi_qla_host *vha); + void (*mpi_fw_dump)(struct scsi_qla_host *, int); + + int (*beacon_on) (struct scsi_qla_host *); +--- a/drivers/scsi/qla2xxx/qla_gbl.h ++++ b/drivers/scsi/qla2xxx/qla_gbl.h +@@ -637,15 +637,16 @@ extern int qla24xx_read_fcp_prio_cfg(scs + /* + * Global Function Prototypes in qla_dbg.c source file. + */ +-extern void qla2100_fw_dump(scsi_qla_host_t *, int); +-extern void qla2300_fw_dump(scsi_qla_host_t *, int); +-extern void qla24xx_fw_dump(scsi_qla_host_t *, int); +-extern void qla25xx_fw_dump(scsi_qla_host_t *, int); +-extern void qla81xx_fw_dump(scsi_qla_host_t *, int); +-extern void qla82xx_fw_dump(scsi_qla_host_t *, int); +-extern void qla8044_fw_dump(scsi_qla_host_t *, int); ++void qla2xxx_dump_fw(scsi_qla_host_t *vha); ++void qla2100_fw_dump(scsi_qla_host_t *vha); ++void qla2300_fw_dump(scsi_qla_host_t *vha); ++void qla24xx_fw_dump(scsi_qla_host_t *vha); ++void qla25xx_fw_dump(scsi_qla_host_t *vha); ++void qla81xx_fw_dump(scsi_qla_host_t *vha); ++void qla82xx_fw_dump(scsi_qla_host_t *vha); ++void qla8044_fw_dump(scsi_qla_host_t *vha); + +-extern void qla27xx_fwdump(scsi_qla_host_t *, int); ++void qla27xx_fwdump(scsi_qla_host_t *vha); + extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int); + extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *); + extern int qla27xx_fwdt_template_valid(void *); +@@ -873,7 +874,7 @@ extern int qla2x00_get_idma_speed(scsi_q + uint16_t *, uint16_t *); + + /* 83xx related functions */ +-extern void qla83xx_fw_dump(scsi_qla_host_t *, int); ++void qla83xx_fw_dump(scsi_qla_host_t *vha); + + /* Minidump related functions */ + extern int qla82xx_md_get_template_size(scsi_qla_host_t *); +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -219,7 +219,7 @@ qla2100_intr_handler(int irq, void *dev_ + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + RD_REG_WORD(®->hccr); + +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) +@@ -350,7 +350,7 @@ qla2300_intr_handler(int irq, void *dev_ + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + RD_REG_WORD(®->hccr); + +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((stat & HSR_RISC_INT) == 0) +@@ -776,7 +776,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t + "MPI Heartbeat stop. FW dump needed\n"); + + if (ql2xfulldump_on_mpifail) { +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + reset_isp_needed = 1; + } + +@@ -907,7 +907,7 @@ qla2x00_async_event(scsi_qla_host_t *vha + if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && + RD_REG_WORD(®24->mailbox7) & BIT_8) + ha->isp_ops->mpi_fw_dump(vha, 1); +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + ha->flags.fw_init_done = 0; + QLA_FW_STOPPED(ha); + +@@ -3472,7 +3472,7 @@ qla24xx_intr_handler(int irq, void *dev_ + + qla2xxx_check_risc_status(vha); + +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((stat & HSRX_RISC_INT) == 0) +@@ -3601,7 +3601,7 @@ qla24xx_msix_default(int irq, void *dev_ + + qla2xxx_check_risc_status(vha); + +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + } else if ((stat & HSRX_RISC_INT) == 0) +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -462,7 +462,7 @@ qla2x00_mailbox_command(scsi_qla_host_t + * a dump + */ + if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) +- ha->isp_ops->fw_dump(vha, 0); ++ qla2xxx_dump_fw(vha); + rval = QLA_FUNCTION_TIMEOUT; + } + } +@@ -6211,7 +6211,7 @@ qla83xx_restart_nic_firmware(scsi_qla_ho + ql_dbg(ql_dbg_mbx, vha, 0x1144, + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); +- ha->isp_ops->fw_dump(vha, 0); ++ qla2xxx_dump_fw(vha); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); + } +@@ -6256,7 +6256,7 @@ qla83xx_access_control(scsi_qla_host_t * + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], + mcp->mb[4]); +- ha->isp_ops->fw_dump(vha, 0); ++ qla2xxx_dump_fw(vha); + } else { + if (subcode & BIT_5) + *sector_size = mcp->mb[1]; +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -4514,7 +4514,7 @@ qla82xx_beacon_off(struct scsi_qla_host + } + + void +-qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla82xx_fw_dump(scsi_qla_host_t *vha) + { + struct qla_hw_data *ha = vha->hw; + +--- a/drivers/scsi/qla2xxx/qla_nx2.c ++++ b/drivers/scsi/qla2xxx/qla_nx2.c +@@ -4070,7 +4070,7 @@ qla8044_abort_isp(scsi_qla_host_t *vha) + } + + void +-qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked) ++qla8044_fw_dump(scsi_qla_host_t *vha) + { + struct qla_hw_data *ha = vha->hw; + +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -7586,7 +7586,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev + if (risc_paused) { + ql_log(ql_log_info, base_vha, 0x9003, + "RISC paused -- mmio_enabled, Dumping firmware.\n"); +- ha->isp_ops->fw_dump(base_vha, 0); ++ qla2xxx_dump_fw(base_vha); + + return PCI_ERS_RESULT_NEED_RESET; + } else +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -5679,9 +5679,9 @@ static int qlt_chk_unresolv_exchg(struct + vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); + + if (qpair == ha->base_qpair) +- ha->isp_ops->fw_dump(vha, 1); ++ ha->isp_ops->fw_dump(vha); + else +- ha->isp_ops->fw_dump(vha, 0); ++ qla2xxx_dump_fw(vha); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -1081,14 +1081,9 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, + } + + void +-qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) ++qla27xx_fwdump(scsi_qla_host_t *vha) + { +- ulong flags = 0; +- +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_lock_irqsave(&vha->hw->hardware_lock, flags); +-#endif ++ lockdep_assert_held(&vha->hw->hardware_lock); + + if (!vha->hw->fw_dump) { + ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n"); +@@ -1105,11 +1100,11 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0xd012, + "-> fwdt0 no template\n"); +- goto bailout; ++ return; + } + len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); + if (len == 0) { +- goto bailout; ++ return; + } else if (len != fwdt->dump_size) { + ql_log(ql_log_warn, vha, 0xd013, + "-> fwdt0 fwdump residual=%+ld\n", +@@ -1124,10 +1119,4 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int + vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags); + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } +- +-bailout: +-#ifndef __CHECKER__ +- if (!hardware_locked) +- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +-#endif + } diff --git a/patches.suse/scsi-qla2xxx-Sort-BUILD_BUG_ON-statements-alphabetic.patch b/patches.suse/scsi-qla2xxx-Sort-BUILD_BUG_ON-statements-alphabetic.patch new file mode 100644 index 0000000..fb3e059 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Sort-BUILD_BUG_ON-statements-alphabetic.patch @@ -0,0 +1,41 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:01 -0700 +Subject: scsi: qla2xxx: Sort BUILD_BUG_ON() statements alphabetically +Patch-mainline: v5.8-rc1 +Git-commit: 59d23cf3f2e4c100ac21f7e00e5817d5e8a3a2e2 +References: bsc#1171688 bsc#1174003 + +Before adding more BUILD_BUG_ON() statements, sort the existing statements +alphabetically. + +Link: https://lore.kernel.org/r/20200518211712.11395-5-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Hannes Reinecke +Reviewed-by: Arun Easi +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_os.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -7855,11 +7855,11 @@ qla2x00_module_init(void) + BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); + BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); + BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); ++ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); ++ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); + BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); + BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); + BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); +- BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); +- BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); + + /* Allocate cache for SRBs. */ + srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, diff --git a/patches.suse/scsi-qla2xxx-Split-qla2x00_configure_local_loop.patch b/patches.suse/scsi-qla2xxx-Split-qla2x00_configure_local_loop.patch new file mode 100644 index 0000000..49ff775 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Split-qla2x00_configure_local_loop.patch @@ -0,0 +1,142 @@ +From: Bart Van Assche +Date: Sun, 5 Apr 2020 15:59:05 -0700 +Subject: scsi: qla2xxx: Split qla2x00_configure_local_loop() +Patch-mainline: v5.8-rc1 +Git-commit: 21e855cdfeb91cf2fe92cd608c3f1d657fc73c5c +References: bsc#1171688 bsc#1174003 + +The size of the function qla2x00_configure_local_loop() hurts its +readability. Hence split that function. This patch does not change any +functionality. + +Link: https://lore.kernel.org/r/20200405225905.17171-1-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Himanshu Madhani +Cc: Quinn Tran +Cc: Martin Wilck +Cc: Daniel Wagner +Cc: Roman Bolshakov +Reviewed-by: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Reviewed-by: Daniel Wagner +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_init.c | 92 +++++++++++++++++++++------------------- + 1 file changed, 50 insertions(+), 42 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -5079,6 +5079,54 @@ qla2x00_configure_loop(scsi_qla_host_t * + return (rval); + } + ++static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) ++{ ++ struct qla_hw_data *ha = vha->hw; ++ unsigned long flags; ++ fc_port_t *fcport; ++ int rval; ++ ++ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { ++ /* borrowing */ ++ u32 *bp, sz; ++ ++ memset(ha->init_cb, 0, ha->init_cb_size); ++ sz = min_t(int, sizeof(struct els_plogi_payload), ++ ha->init_cb_size); ++ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, ++ ha->init_cb, sz); ++ if (rval == QLA_SUCCESS) { ++ __be32 *q = &ha->plogi_els_payld.data[0]; ++ ++ bp = (uint32_t *)ha->init_cb; ++ cpu_to_be32_array(q, bp, sz / 4); ++ memcpy(bp, q, sizeof(ha->plogi_els_payld.data)); ++ } else { ++ ql_dbg(ql_dbg_init, vha, 0x00d1, ++ "PLOGI ELS param read fail.\n"); ++ goto skip_login; ++ } ++ } ++ ++ list_for_each_entry(fcport, &vha->vp_fcports, list) { ++ if (fcport->n2n_flag) { ++ qla24xx_fcport_handle_login(vha, fcport); ++ return QLA_SUCCESS; ++ } ++ } ++ ++skip_login: ++ spin_lock_irqsave(&vha->work_lock, flags); ++ vha->scan.scan_retry++; ++ spin_unlock_irqrestore(&vha->work_lock, flags); ++ ++ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { ++ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); ++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); ++ } ++ return QLA_FUNCTION_FAILED; ++} ++ + /* + * qla2x00_configure_local_loop + * Updates Fibre Channel Device Database with local loop devices. +@@ -5096,7 +5144,6 @@ qla2x00_configure_local_loop(scsi_qla_ho + int found_devs; + int found; + fc_port_t *fcport, *new_fcport; +- + uint16_t index; + uint16_t entries; + struct gid_list_info *gid; +@@ -5106,47 +5153,8 @@ qla2x00_configure_local_loop(scsi_qla_ho + unsigned long flags; + + /* Inititae N2N login. */ +- if (N2N_TOPO(ha)) { +- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { +- /* borrowing */ +- u32 *bp, sz; +- +- memset(ha->init_cb, 0, ha->init_cb_size); +- sz = min_t(int, sizeof(struct els_plogi_payload), +- ha->init_cb_size); +- rval = qla24xx_get_port_login_templ(vha, +- ha->init_cb_dma, (void *)ha->init_cb, sz); +- if (rval == QLA_SUCCESS) { +- __be32 *q = &ha->plogi_els_payld.data[0]; +- +- bp = (uint32_t *)ha->init_cb; +- cpu_to_be32_array(q, bp, sz / 4); +- +- memcpy(bp, q, sizeof(ha->plogi_els_payld.data)); +- } else { +- ql_dbg(ql_dbg_init, vha, 0x00d1, +- "PLOGI ELS param read fail.\n"); +- goto skip_login; +- } +- } +- +- list_for_each_entry(fcport, &vha->vp_fcports, list) { +- if (fcport->n2n_flag) { +- qla24xx_fcport_handle_login(vha, fcport); +- return QLA_SUCCESS; +- } +- } +-skip_login: +- spin_lock_irqsave(&vha->work_lock, flags); +- vha->scan.scan_retry++; +- spin_unlock_irqrestore(&vha->work_lock, flags); +- +- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { +- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); +- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); +- } +- return QLA_FUNCTION_FAILED; +- } ++ if (N2N_TOPO(ha)) ++ return qla2x00_configure_n2n_loop(vha); + + found_devs = 0; + new_fcport = NULL; diff --git a/patches.suse/scsi-qla2xxx-Use-ARRAY_SIZE-instead-of-open-coding-i.patch b/patches.suse/scsi-qla2xxx-Use-ARRAY_SIZE-instead-of-open-coding-i.patch new file mode 100644 index 0000000..eb58743 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Use-ARRAY_SIZE-instead-of-open-coding-i.patch @@ -0,0 +1,176 @@ +From: Bart Van Assche +Date: Sun, 12 Apr 2020 19:13:59 -0700 +Subject: scsi: qla2xxx: Use ARRAY_SIZE() instead of open-coding it +Patch-mainline: v5.8-rc1 +Git-commit: d221aed19c80d83c2de2fa5e8a5ba045065c6d4b +References: bsc#1171688 bsc#1174003 + +This patch does not change any functionality. + +Link: https://lore.kernel.org/r/20200413021359.21725-1-bvanassche@acm.org +Cc: Nilesh Javali +Cc: Quinn Tran +Cc: Himanshu Madhani +Cc: Martin Wilck +Cc: Daniel Wagner +Cc: Roman Bolshakov +Reviewed-by: Daniel Wagner +Reviewed-by: Roman Bolshakov +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_dbg.c | 36 ++++++++++++++++++------------------ + 1 file changed, 18 insertions(+), 18 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -778,16 +778,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, in + + if (rval == QLA_SUCCESS) { + dmp_reg = ®->flash_address; +- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + + dmp_reg = ®->u.isp2300.req_q_in; +- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); + cnt++, dmp_reg++) + fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + + dmp_reg = ®->u.isp2300.mailbox0; +- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); + cnt++, dmp_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + +@@ -799,7 +799,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, in + + WRT_REG_WORD(®->ctrl_status, 0x00); + dmp_reg = ®->risc_hw; +- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); + cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + +@@ -860,12 +860,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, in + /* Get RISC SRAM. */ + if (rval == QLA_SUCCESS) + rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, +- sizeof(fw->risc_ram) / 2, &nxt); ++ ARRAY_SIZE(fw->risc_ram), &nxt); + + /* Get stack SRAM. */ + if (rval == QLA_SUCCESS) + rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, +- sizeof(fw->stack_ram) / 2, &nxt); ++ ARRAY_SIZE(fw->stack_ram), &nxt); + + /* Get data SRAM. */ + if (rval == QLA_SUCCESS) +@@ -944,7 +944,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, in + } + if (rval == QLA_SUCCESS) { + dmp_reg = ®->flash_address; +- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + + dmp_reg = ®->u.isp2100.mailbox0; +@@ -956,12 +956,12 @@ qla2100_fw_dump(scsi_qla_host_t *vha, in + } + + dmp_reg = ®->u.isp2100.unused_2[0]; +- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) + fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + + WRT_REG_WORD(®->ctrl_status, 0x00); + dmp_reg = ®->risc_hw; +- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); + + WRT_REG_WORD(®->pcr, 0x2000); +@@ -1041,7 +1041,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, in + WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + } +- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; + cnt++, risc_address++) { + WRT_MAILBOX_REG(ha, reg, 1, risc_address); + WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); +@@ -1145,7 +1145,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, in + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; +- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); + + /* Disable interrupts. */ +@@ -1178,7 +1178,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, in + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; +- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); + + /* Transfer sequence registers. */ +@@ -1421,7 +1421,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, in + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; +- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); + + /* Disable interrupts. */ +@@ -1470,7 +1470,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, in + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; +- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); + + /* Transfer sequence registers. */ +@@ -1745,7 +1745,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, in + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; +- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); + + /* Disable interrupts. */ +@@ -1794,7 +1794,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, in + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; +- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); + + /* Transfer sequence registers. */ +@@ -2093,7 +2093,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, in + + /* Host interface registers. */ + dmp_reg = ®->flash_addr; +- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); + + /* Disable interrupts. */ +@@ -2142,7 +2142,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, in + + /* Mailbox registers. */ + mbx_reg = ®->mailbox0; +- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) ++ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); + + /* Transfer sequence registers. */ diff --git a/patches.suse/scsi-qla2xxx-Use-MBX_TOV_SECONDS-for-mailbox-command.patch b/patches.suse/scsi-qla2xxx-Use-MBX_TOV_SECONDS-for-mailbox-command.patch new file mode 100644 index 0000000..fba3344 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Use-MBX_TOV_SECONDS-for-mailbox-command.patch @@ -0,0 +1,84 @@ +From: Enzo Matsumiya +Date: Wed, 5 Aug 2020 17:05:46 -0300 +Subject: scsi: qla2xxx: Use MBX_TOV_SECONDS for mailbox command timeout values +Patch-mainline: v5.9-rc2 +Git-commit: c314a014b1802d30f1dc50db18b2e7f5a77d19c6 +References: bsc#1171688 bsc#1174003 + +Improves readability of qla_mbx.c. + +Link: https://lore.kernel.org/r/20200805200546.22497-1-ematsumiya@suse.de +Reviewed-by: Himanshu Madhani +Reviewed-by: Roman Bolshakov +Signed-off-by: Enzo Matsumiya +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_mbx.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_mbx.c ++++ b/drivers/scsi/qla2xxx/qla_mbx.c +@@ -5240,7 +5240,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *v + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_0; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { +@@ -5428,7 +5428,7 @@ qla2x00_write_ram_word(scsi_qla_host_t * + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { +@@ -5700,7 +5700,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vh + mcp->mb[9] = vha->vp_idx; + mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (mb != NULL) { +@@ -5787,7 +5787,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t + + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_0; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); +@@ -5822,7 +5822,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t + + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_0; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); +@@ -6014,7 +6014,7 @@ qla81xx_set_led_config(scsi_qla_host_t * + if (IS_QLA8031(ha)) + mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; + mcp->in_mb = MBX_0; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); +@@ -6050,7 +6050,7 @@ qla81xx_get_led_config(scsi_qla_host_t * + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA8031(ha)) + mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; +- mcp->tov = 30; ++ mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); diff --git a/patches.suse/scsi-qla2xxx-Use-make_handle-instead-of-open-coding-.patch b/patches.suse/scsi-qla2xxx-Use-make_handle-instead-of-open-coding-.patch new file mode 100644 index 0000000..d29948f --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Use-make_handle-instead-of-open-coding-.patch @@ -0,0 +1,47 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:10 -0700 +Subject: scsi: qla2xxx: Use make_handle() instead of open-coding it +Patch-mainline: v5.8-rc1 +Git-commit: 2a4b684ab0aa2a8f8efede77ca1525042c2c0e01 +References: bsc#1171688 bsc#1174003 + +Link: https://lore.kernel.org/r/20200518211712.11395-14-bvanassche@acm.org +Cc: Arun Easi +Cc: Nilesh Javali +Cc: Martin Wilck +Reviewed-by: Hannes Reinecke +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Reviewed-by: Roman Bolshakov +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_isr.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_isr.c ++++ b/drivers/scsi/qla2xxx/qla_isr.c +@@ -818,7 +818,7 @@ qla2x00_async_event(scsi_qla_host_t *vha + goto skip_rio; + switch (mb[0]) { + case MBA_SCSI_COMPLETION: +- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); ++ handles[0] = le32_to_cpu(make_handle(mb[2], mb[1])); + handle_cnt = 1; + break; + case MBA_CMPLT_1_16BIT: +@@ -857,10 +857,10 @@ qla2x00_async_event(scsi_qla_host_t *vha + mb[0] = MBA_SCSI_COMPLETION; + break; + case MBA_CMPLT_2_32BIT: +- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); +- handles[1] = le32_to_cpu( +- ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | +- RD_MAILBOX_REG(ha, reg, 6)); ++ handles[0] = le32_to_cpu(make_handle(mb[2], mb[1])); ++ handles[1] = ++ le32_to_cpu(make_handle(RD_MAILBOX_REG(ha, reg, 7), ++ RD_MAILBOX_REG(ha, reg, 6))); + handle_cnt = 2; + mb[0] = MBA_SCSI_COMPLETION; + break; diff --git a/patches.suse/scsi-qla2xxx-Use-register-names-instead-of-register-.patch b/patches.suse/scsi-qla2xxx-Use-register-names-instead-of-register-.patch new file mode 100644 index 0000000..17ed1da --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Use-register-names-instead-of-register-.patch @@ -0,0 +1,47 @@ +From: Bart Van Assche +Date: Mon, 18 May 2020 14:17:06 -0700 +Subject: scsi: qla2xxx: Use register names instead of register offsets +Patch-mainline: v5.8-rc1 +Git-commit: c3888416221849ed46fd35413c7a1d00ee291cbe +References: bsc#1171688 bsc#1174003 + +Make qla27xx_write_remote_reg() easier to read by using register names +instead of register offsets. The 'pahole' tool has been used to convert +register offsets into register names. See also commit cbb01c2f2f63 ("scsi: +qla2xxx: Fix MPI failure AEN (8200) handling"). + +Link: https://lore.kernel.org/r/20200518211712.11395-10-bvanassche@acm.org +Cc: Arun Easi +Cc: Nilesh Javali +Cc: Martin Wilck +Cc: Roman Bolshakov +Reviewed-by: Hannes Reinecke +Reviewed-by: Daniel Wagner +Reviewed-by: Himanshu Madhani +Signed-off-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +--- + drivers/scsi/qla2xxx/qla_tmpl.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -17,14 +17,14 @@ static void + qla27xx_write_remote_reg(struct scsi_qla_host *vha, + u32 addr, u32 data) + { +- char *reg = (char *)ISPREG(vha); ++ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; + + ql_dbg(ql_dbg_misc, vha, 0xd300, + "%s: addr/data = %xh/%xh\n", __func__, addr, data); + +- WRT_REG_DWORD(reg + IOBASE(vha), 0x40); +- WRT_REG_DWORD(reg + 0xc4, data); +- WRT_REG_DWORD(reg + 0xc0, addr); ++ WRT_REG_DWORD(®->iobase_addr, 0x40); ++ WRT_REG_DWORD(®->iobase_c4, data); ++ WRT_REG_DWORD(®->iobase_window, addr); + } + + void diff --git a/patches.suse/scsi-qla2xxx-Use-true-false-for-ha-fw_dumped.patch b/patches.suse/scsi-qla2xxx-Use-true-false-for-ha-fw_dumped.patch new file mode 100644 index 0000000..c2ae3d3 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Use-true-false-for-ha-fw_dumped.patch @@ -0,0 +1,133 @@ +From: Jason Yan +Date: Thu, 30 Apr 2020 20:18:00 +0800 +Subject: scsi: qla2xxx: Use true, false for ha->fw_dumped +Patch-mainline: v5.8-rc1 +Git-commit: dbe6f49259dacc073c1ae602f383c177f57b1b8a +References: bsc#1171688 bsc#1174003 + +Fix the following coccicheck warning: + +drivers/scsi/qla2xxx/qla_tmpl.c:1120:2-20: WARNING: Assignment of 0/1 to +bool variable + +Link: https://lore.kernel.org/r/20200430121800.15323-1-yanaijie@huawei.com +Signed-off-by: Jason Yan +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_attr.c | 2 +- + drivers/scsi/qla2xxx/qla_dbg.c | 4 ++-- + drivers/scsi/qla2xxx/qla_nx.c | 4 ++-- + drivers/scsi/qla2xxx/qla_nx2.c | 8 ++++---- + drivers/scsi/qla2xxx/qla_os.c | 2 +- + drivers/scsi/qla2xxx/qla_tmpl.c | 2 +- + 6 files changed, 11 insertions(+), 11 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -84,7 +84,7 @@ qla2x00_sysfs_write_fw_dump(struct file + qla82xx_md_prep(vha); + } + ha->fw_dump_reading = 0; +- ha->fw_dumped = 0; ++ ha->fw_dumped = false; + break; + case 1: + if (ha->fw_dumped && !ha->fw_dump_reading) { +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -706,12 +706,12 @@ qla2xxx_dump_post_process(scsi_qla_host_ + ql_log(ql_log_warn, vha, 0xd000, + "Failed to dump firmware (%x), dump status flags (0x%lx).\n", + rval, ha->fw_dump_cap_flags); +- ha->fw_dumped = 0; ++ ha->fw_dumped = false; + } else { + ql_log(ql_log_info, vha, 0xd001, + "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", + vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); +- ha->fw_dumped = 1; ++ ha->fw_dumped = true; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + } + } +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -4177,7 +4177,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) + goto md_failed; + } + +- ha->fw_dumped = 0; ++ ha->fw_dumped = false; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb038, +@@ -4357,7 +4357,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) + ql_log(ql_log_info, vha, 0xb044, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); +- ha->fw_dumped = 1; ++ ha->fw_dumped = true; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + + md_failed: +--- a/drivers/scsi/qla2xxx/qla_nx2.c ++++ b/drivers/scsi/qla2xxx/qla_nx2.c +@@ -1441,7 +1441,7 @@ qla8044_device_bootstrap(struct scsi_qla + if (idc_ctrl & GRACEFUL_RESET_BIT1) { + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_ctrl & ~GRACEFUL_RESET_BIT1)); +- ha->fw_dumped = 0; ++ ha->fw_dumped = false; + } + + dev_ready: +@@ -3249,7 +3249,7 @@ qla8044_collect_md_data(struct scsi_qla_ + goto md_failed; + } + +- ha->fw_dumped = 0; ++ ha->fw_dumped = false; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb10e, +@@ -3470,7 +3470,7 @@ qla8044_collect_md_data(struct scsi_qla_ + ql_log(ql_log_info, vha, 0xb110, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); +- ha->fw_dumped = 1; ++ ha->fw_dumped = true; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + + +@@ -3487,7 +3487,7 @@ qla8044_get_minidump(struct scsi_qla_hos + struct qla_hw_data *ha = vha->hw; + + if (!qla8044_collect_md_data(vha)) { +- ha->fw_dumped = 1; ++ ha->fw_dumped = true; + ha->prev_minidump_failed = 0; + } else { + ql_log(ql_log_fatal, vha, 0xb0db, +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -4633,7 +4633,7 @@ qla2x00_free_fw_dump(struct qla_hw_data + ha->flags.fce_enabled = 0; + ha->eft = NULL; + ha->eft_dma = 0; +- ha->fw_dumped = 0; ++ ha->fw_dumped = false; + ha->fw_dump_cap_flags = 0; + ha->fw_dump_reading = 0; + ha->fw_dump = NULL; +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -1117,7 +1117,7 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int + } + + vha->hw->fw_dump_len = len; +- vha->hw->fw_dumped = 1; ++ vha->hw->fw_dumped = true; + + ql_log(ql_log_warn, vha, 0xd015, + "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n", diff --git a/patches.suse/scsi-qla2xxx-Use-true-false-for-need_mpi_reset.patch b/patches.suse/scsi-qla2xxx-Use-true-false-for-need_mpi_reset.patch new file mode 100644 index 0000000..bc3299c --- /dev/null +++ b/patches.suse/scsi-qla2xxx-Use-true-false-for-need_mpi_reset.patch @@ -0,0 +1,43 @@ +From: Jason Yan +Date: Thu, 30 Apr 2020 20:17:51 +0800 +Subject: scsi: qla2xxx: Use true, false for need_mpi_reset +Patch-mainline: v5.8-rc1 +Git-commit: bda552a7741a23708823c6e87a39d9a956087ac0 +References: bsc#1171688 bsc#1174003 + +Fix the following coccicheck warning: + +drivers/scsi/qla2xxx/qla_tmpl.c:1031:6-20: WARNING: Assignment of 0/1 to +bool variable +drivers/scsi/qla2xxx/qla_tmpl.c:1062:3-17: WARNING: Assignment of 0/1 to +bool variable + +Link: https://lore.kernel.org/r/20200430121751.15232-1-yanaijie@huawei.com +Reviewed-by: Himanshu Madhani +Signed-off-by: Jason Yan +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_tmpl.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -1028,7 +1028,7 @@ void + qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) + { + ulong flags = 0; +- bool need_mpi_reset = 1; ++ bool need_mpi_reset = true; + + #ifndef __CHECKER__ + if (!hardware_locked) +@@ -1059,7 +1059,7 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, + "-> fwdt1 fwdump residual=%+ld\n", + fwdt->dump_size - len); + } else { +- need_mpi_reset = 0; ++ need_mpi_reset = false; + } + + vha->hw->mpi_fw_dump_len = len; diff --git a/patches.suse/scsi-qla2xxx-make-1-bit-bit-fields-unsigned-int.patch b/patches.suse/scsi-qla2xxx-make-1-bit-bit-fields-unsigned-int.patch new file mode 100644 index 0000000..602a554 --- /dev/null +++ b/patches.suse/scsi-qla2xxx-make-1-bit-bit-fields-unsigned-int.patch @@ -0,0 +1,35 @@ +From: Colin Ian King +Date: Tue, 28 Apr 2020 11:20:13 +0100 +Subject: scsi: qla2xxx: make 1-bit bit-fields unsigned int +Patch-mainline: v5.8-rc1 +Git-commit: 78b874b7cbf09fbfadfa5f18a347ebef7bbb49fe +References: bsc#1171688 bsc#1174003 + +The bitfields mpi_fw_dump_reading and mpi_fw_dumped are currently signed +which is not recommended as the representation is an implementation defined +behaviour. Fix this by making the bit-fields unsigned ints. + +Link: https://lore.kernel.org/r/20200428102013.1040598-1-colin.king@canonical.com +Fixes: cbb01c2f2f63 ("scsi: qla2xxx: Fix MPI failure AEN (8200) handling") +Reviewed-by: Bart Van Assche +Reviewed-by: Himanshu Madhani +Signed-off-by: Colin Ian King +Signed-off-by: Martin K. Petersen +Acked-by: Daniel Wagner +--- + drivers/scsi/qla2xxx/qla_def.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_def.h ++++ b/drivers/scsi/qla2xxx/qla_def.h +@@ -4248,8 +4248,8 @@ struct qla_hw_data { + int fw_dump_reading; + void *mpi_fw_dump; + u32 mpi_fw_dump_len; +- int mpi_fw_dump_reading:1; +- int mpi_fw_dumped:1; ++ unsigned int mpi_fw_dump_reading:1; ++ unsigned int mpi_fw_dumped:1; + int prev_minidump_failed; + dma_addr_t eft_dma; + void *eft; diff --git a/patches.suse/scsi-target-iblock-fix-WRITE-SAME-zeroing.patch b/patches.suse/scsi-target-iblock-fix-WRITE-SAME-zeroing.patch new file mode 100644 index 0000000..9a301db --- /dev/null +++ b/patches.suse/scsi-target-iblock-fix-WRITE-SAME-zeroing.patch @@ -0,0 +1,46 @@ +From 1d2ff149b263c9325875726a7804a0c75ef7112e Mon Sep 17 00:00:00 2001 +From: David Disseldorp +Date: Sun, 19 Apr 2020 18:31:09 +0200 +Subject: [PATCH] scsi: target/iblock: fix WRITE SAME zeroing +Patch-mainline: v5.7-rc4 +Git-commit: 1d2ff149b263c9325875726a7804a0c75ef7112e +References: bsc#1169790 + +SBC4 specifies that WRITE SAME requests with the UNMAP bit set to zero +"shall perform the specified write operation to each LBA specified by the +command". Commit 2237498f0b5c ("target/iblock: Convert WRITE_SAME to +blkdev_issue_zeroout") modified the iblock backend to call +blkdev_issue_zeroout() when handling WRITE SAME requests with UNMAP=0 and a +zero data segment. + +The iblock blkdev_issue_zeroout() call incorrectly provides a flags +parameter of 0 (bool false), instead of BLKDEV_ZERO_NOUNMAP. The bool +false parameter reflects the blkdev_issue_zeroout() API prior to commit +ee472d835c26 ("block: add a flags argument to (__)blkdev_issue_zeroout") +which was merged shortly before 2237498f0b5c. + +Link: https://lore.kernel.org/r/20200419163109.11689-1-ddiss@suse.de +Fixes: 2237498f0b5c ("target/iblock: Convert WRITE_SAME to blkdev_issue_zeroout") +Reviewed-by: Bart Van Assche +Signed-off-by: David Disseldorp +Signed-off-by: Martin K. Petersen +--- + drivers/target/target_core_iblock.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c +index 51ffd5c002de..1c181d31f4c8 100644 +--- a/drivers/target/target_core_iblock.c ++++ b/drivers/target/target_core_iblock.c +@@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) + target_to_linux_sector(dev, cmd->t_task_lba), + target_to_linux_sector(dev, + sbc_get_write_same_sectors(cmd)), +- GFP_KERNEL, false); ++ GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); + if (ret) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + +-- +2.26.2 + diff --git a/patches.suse/sctp-fix-possibly-using-a-bad-saddr-with-a-given-dst.patch b/patches.suse/sctp-fix-possibly-using-a-bad-saddr-with-a-given-dst.patch index f341d6a..c0985a9 100644 --- a/patches.suse/sctp-fix-possibly-using-a-bad-saddr-with-a-given-dst.patch +++ b/patches.suse/sctp-fix-possibly-using-a-bad-saddr-with-a-given-dst.patch @@ -29,13 +29,13 @@ Tested-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Jiri Slaby --- - net/sctp/ipv6.c | 20 ++++++++++++++------ - net/sctp/protocol.c | 28 +++++++++++++++++++--------- + net/sctp/ipv6.c | 20 ++++++++++++++------ + net/sctp/protocol.c | 28 +++++++++++++++++++--------- 2 files changed, 33 insertions(+), 15 deletions(-) --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c -@@ -237,7 +237,8 @@ static void sctp_v6_get_dst(struct sctp_ +@@ -237,7 +237,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct dst_entry *dst = NULL; @@ -45,7 +45,7 @@ Signed-off-by: Jiri Slaby struct sctp_bind_addr *bp; struct ipv6_pinfo *np = inet6_sk(sk); struct sctp_sockaddr_entry *laddr; -@@ -247,7 +248,7 @@ static void sctp_v6_get_dst(struct sctp_ +@@ -247,7 +248,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, __u8 matchlen = 0; sctp_scope_t scope; @@ -54,10 +54,10 @@ Signed-off-by: Jiri Slaby fl6->daddr = daddr->v6.sin6_addr; fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; -@@ -271,8 +272,11 @@ static void sctp_v6_get_dst(struct sctp_ +@@ -271,8 +272,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, rcu_read_unlock(); - dst = ip6_dst_lookup_flow(sk, fl6, final_p); + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); - if (!asoc || saddr) + if (!asoc || saddr) { + t->dst = dst; @@ -67,7 +67,7 @@ Signed-off-by: Jiri Slaby bp = &asoc->base.bind_addr; scope = sctp_scope(daddr); -@@ -295,6 +299,8 @@ static void sctp_v6_get_dst(struct sctp_ +@@ -295,6 +299,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if ((laddr->a.sa.sa_family == AF_INET6) && (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { rcu_read_unlock(); @@ -76,7 +76,7 @@ Signed-off-by: Jiri Slaby goto out; } } -@@ -333,6 +339,8 @@ static void sctp_v6_get_dst(struct sctp_ +@@ -333,6 +339,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (!IS_ERR_OR_NULL(dst)) dst_release(dst); dst = bdst; @@ -85,7 +85,7 @@ Signed-off-by: Jiri Slaby break; } -@@ -346,6 +354,8 @@ static void sctp_v6_get_dst(struct sctp_ +@@ -346,6 +354,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, dst_release(dst); dst = bdst; matchlen = bmatchlen; @@ -94,7 +94,7 @@ Signed-off-by: Jiri Slaby } rcu_read_unlock(); -@@ -354,14 +364,12 @@ out: +@@ -354,14 +364,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct rt6_info *rt; rt = (struct rt6_info *)dst; @@ -112,7 +112,7 @@ Signed-off-by: Jiri Slaby } --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c -@@ -436,14 +436,15 @@ static void sctp_v4_get_dst(struct sctp_ +@@ -436,14 +436,15 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct rtable *rt; @@ -130,7 +130,7 @@ Signed-off-by: Jiri Slaby fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; -@@ -461,8 +462,11 @@ static void sctp_v4_get_dst(struct sctp_ +@@ -461,8 +462,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, &fl4->saddr); rt = ip_route_output_key(sock_net(sk), fl4); @@ -143,7 +143,7 @@ Signed-off-by: Jiri Slaby /* If there is no association or if a source address is passed, no * more validation is required. -@@ -525,27 +529,33 @@ static void sctp_v4_get_dst(struct sctp_ +@@ -525,27 +529,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, false); if (!odev || odev->ifindex != fl4->flowi4_oif) { diff --git a/patches.suse/serial-8250-change-lock-order-in-serial8250_do_start.patch b/patches.suse/serial-8250-change-lock-order-in-serial8250_do_start.patch new file mode 100644 index 0000000..95a26c5 --- /dev/null +++ b/patches.suse/serial-8250-change-lock-order-in-serial8250_do_start.patch @@ -0,0 +1,215 @@ +From 205d300aea75623e1ae4aa43e0d265ab9cf195fd Mon Sep 17 00:00:00 2001 +From: Sergey Senozhatsky +Date: Mon, 17 Aug 2020 11:26:46 +0900 +Subject: [PATCH] serial: 8250: change lock order in serial8250_do_startup() +Git-commit: 205d300aea75623e1ae4aa43e0d265ab9cf195fd +Patch-mainline: v5.9-rc3 +References: git-fixes + +We have a number of "uart.port->desc.lock vs desc.lock->uart.port" +lockdep reports coming from 8250 driver; this causes a bit of trouble +to people, so let's fix it. + +The problem is reverse lock order in two different call paths: + +chain #1: + + serial8250_do_startup() + spin_lock_irqsave(&port->lock); + disable_irq_nosync(port->irq); + raw_spin_lock_irqsave(&desc->lock) + +chain #2: + + __report_bad_irq() + raw_spin_lock_irqsave(&desc->lock) + for_each_action_of_desc() + printk() + spin_lock_irqsave(&port->lock); + +Fix this by changing the order of locks in serial8250_do_startup(): + do disable_irq_nosync() first, which grabs desc->lock, and grab + uart->port after that, so that chain #1 and chain #2 have same lock + order. + +Full lockdep splat: + + ====================================================== + WARNING: possible circular locking dependency detected + 5.4.39 #55 Not tainted + ====================================================== + + swapper/0/0 is trying to acquire lock: + ffffffffab65b6c0 (console_owner){-...}, at: console_lock_spinning_enable+0x31/0x57 + + but task is already holding lock: + ffff88810a8e34c0 (&irq_desc_lock_class){-.-.}, at: __report_bad_irq+0x5b/0xba + + which lock already depends on the new lock. + + the existing dependency chain (in reverse order) is: + + -> #2 (&irq_desc_lock_class){-.-.}: + _raw_spin_lock_irqsave+0x61/0x8d + __irq_get_desc_lock+0x65/0x89 + __disable_irq_nosync+0x3b/0x93 + serial8250_do_startup+0x451/0x75c + uart_startup+0x1b4/0x2ff + uart_port_activate+0x73/0xa0 + tty_port_open+0xae/0x10a + uart_open+0x1b/0x26 + tty_open+0x24d/0x3a0 + chrdev_open+0xd5/0x1cc + do_dentry_open+0x299/0x3c8 + path_openat+0x434/0x1100 + do_filp_open+0x9b/0x10a + do_sys_open+0x15f/0x3d7 + kernel_init_freeable+0x157/0x1dd + kernel_init+0xe/0x105 + ret_from_fork+0x27/0x50 + + -> #1 (&port_lock_key){-.-.}: + _raw_spin_lock_irqsave+0x61/0x8d + serial8250_console_write+0xa7/0x2a0 + console_unlock+0x3b7/0x528 + vprintk_emit+0x111/0x17f + printk+0x59/0x73 + register_console+0x336/0x3a4 + uart_add_one_port+0x51b/0x5be + serial8250_register_8250_port+0x454/0x55e + dw8250_probe+0x4dc/0x5b9 + platform_drv_probe+0x67/0x8b + really_probe+0x14a/0x422 + driver_probe_device+0x66/0x130 + device_driver_attach+0x42/0x5b + __driver_attach+0xca/0x139 + bus_for_each_dev+0x97/0xc9 + bus_add_driver+0x12b/0x228 + driver_register+0x64/0xed + do_one_initcall+0x20c/0x4a6 + do_initcall_level+0xb5/0xc5 + do_basic_setup+0x4c/0x58 + kernel_init_freeable+0x13f/0x1dd + kernel_init+0xe/0x105 + ret_from_fork+0x27/0x50 + + -> #0 (console_owner){-...}: + __lock_acquire+0x118d/0x2714 + lock_acquire+0x203/0x258 + console_lock_spinning_enable+0x51/0x57 + console_unlock+0x25d/0x528 + vprintk_emit+0x111/0x17f + printk+0x59/0x73 + __report_bad_irq+0xa3/0xba + note_interrupt+0x19a/0x1d6 + handle_irq_event_percpu+0x57/0x79 + handle_irq_event+0x36/0x55 + handle_fasteoi_irq+0xc2/0x18a + do_IRQ+0xb3/0x157 + ret_from_intr+0x0/0x1d + cpuidle_enter_state+0x12f/0x1fd + cpuidle_enter+0x2e/0x3d + do_idle+0x1ce/0x2ce + cpu_startup_entry+0x1d/0x1f + start_kernel+0x406/0x46a + secondary_startup_64+0xa4/0xb0 + + other info that might help us debug this: + + Chain exists of: + console_owner --> &port_lock_key --> &irq_desc_lock_class + + Possible unsafe locking scenario: + + CPU0 CPU1 + ---- ---- + lock(&irq_desc_lock_class); + lock(&port_lock_key); + lock(&irq_desc_lock_class); + lock(console_owner); + + *** DEADLOCK *** + + 2 locks held by swapper/0/0: + #0: ffff88810a8e34c0 (&irq_desc_lock_class){-.-.}, at: __report_bad_irq+0x5b/0xba + #1: ffffffffab65b5c0 (console_lock){+.+.}, at: console_trylock_spinning+0x20/0x181 + + stack backtrace: + CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.39 #55 + Hardware name: XXXXXX + Call Trace: + + dump_stack+0xbf/0x133 + ? print_circular_bug+0xd6/0xe9 + check_noncircular+0x1b9/0x1c3 + __lock_acquire+0x118d/0x2714 + lock_acquire+0x203/0x258 + ? console_lock_spinning_enable+0x31/0x57 + console_lock_spinning_enable+0x51/0x57 + ? console_lock_spinning_enable+0x31/0x57 + console_unlock+0x25d/0x528 + ? console_trylock+0x18/0x4e + vprintk_emit+0x111/0x17f + ? lock_acquire+0x203/0x258 + printk+0x59/0x73 + __report_bad_irq+0xa3/0xba + note_interrupt+0x19a/0x1d6 + handle_irq_event_percpu+0x57/0x79 + handle_irq_event+0x36/0x55 + handle_fasteoi_irq+0xc2/0x18a + do_IRQ+0xb3/0x157 + common_interrupt+0xf/0xf + + +Signed-off-by: Sergey Senozhatsky +Fixes: 768aec0b5bcc ("serial: 8250: fix shared interrupts issues with SMP and RT kernels") +Reported-by: Guenter Roeck +Reported-by: Raul Rangel +Buglink: https://bugs.chromium.org/p/chromium/issues/detail?id=1114800 +Link: https://lore.kernel.org/lkml/CAHQZ30BnfX+gxjPm1DUd5psOTqbyDh4EJE=2=VAMW_VDafctkA@mail.gmail.com/T/#u +Reviewed-by: Andy Shevchenko +Reviewed-by: Guenter Roeck +Tested-by: Guenter Roeck +Cc: stable +Link: https://lore.kernel.org/r/20200817022646.1484638-1-sergey.senozhatsky@gmail.com +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/tty/serial/8250/8250_port.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2209,6 +2209,10 @@ int serial8250_do_startup(struct uart_po + + if (port->irq) { + unsigned char iir1; ++ ++ if (port->irqflags & IRQF_SHARED) ++ disable_irq_nosync(port->irq); ++ + /* + * Test for UARTs that do not reassert THRE when the + * transmitter is idle and the interrupt has already +@@ -2218,8 +2222,6 @@ int serial8250_do_startup(struct uart_po + * allow register changes to become visible. + */ + spin_lock_irqsave(&port->lock, flags); +- if (up->port.irqflags & IRQF_SHARED) +- disable_irq_nosync(port->irq); + + wait_for_xmitr(up, UART_LSR_THRE); + serial_port_out_sync(port, UART_IER, UART_IER_THRI); +@@ -2231,9 +2233,10 @@ int serial8250_do_startup(struct uart_po + iir = serial_port_in(port, UART_IIR); + serial_port_out(port, UART_IER, 0); + ++ spin_unlock_irqrestore(&port->lock, flags); ++ + if (port->irqflags & IRQF_SHARED) + enable_irq(port->irq); +- spin_unlock_irqrestore(&port->lock, flags); + + /* + * If the interrupt is not reasserted, or we otherwise diff --git a/patches.suse/serial-pl011-Don-t-leak-amba_ports-entry-on-driver-r.patch b/patches.suse/serial-pl011-Don-t-leak-amba_ports-entry-on-driver-r.patch new file mode 100644 index 0000000..9472973 --- /dev/null +++ b/patches.suse/serial-pl011-Don-t-leak-amba_ports-entry-on-driver-r.patch @@ -0,0 +1,57 @@ +From 89efbe70b27dd325d8a8c177743a26b885f7faec Mon Sep 17 00:00:00 2001 +From: Lukas Wunner +Date: Thu, 13 Aug 2020 12:59:54 +0200 +Subject: [PATCH] serial: pl011: Don't leak amba_ports entry on driver register error +Git-commit: 89efbe70b27dd325d8a8c177743a26b885f7faec +Patch-mainline: v5.9-rc3 +References: git-fixes + +pl011_probe() calls pl011_setup_port() to reserve an amba_ports[] entry, +then calls pl011_register_port() to register the uart driver with the +tty layer. + +If registration of the uart driver fails, the amba_ports[] entry is not +released. If this happens 14 times (value of UART_NR macro), then all +amba_ports[] entries will have been leaked and driver probing is no +longer possible. (To be fair, that can only happen if the DeviceTree +doesn't contain alias IDs since they cause the same entry to be used for +a given port.) Fix it. + +Fixes: ef2889f7ffee ("serial: pl011: Move uart_register_driver call to device") +Signed-off-by: Lukas Wunner +Cc: stable@vger.kernel.org # v3.15+ +Cc: Tushar Behera +Link: https://lore.kernel.org/r/138f8c15afb2f184d8102583f8301575566064a6.1597316167.git.lukas@wunner.de +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/tty/serial/amba-pl011.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index c010f639298d..7893048bea2d 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2615,7 +2615,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, + + static int pl011_register_port(struct uart_amba_port *uap) + { +- int ret; ++ int ret, i; + + /* Ensure interrupts from this UART are masked and cleared */ + pl011_write(0, uap, REG_IMSC); +@@ -2626,6 +2626,9 @@ static int pl011_register_port(struct uart_amba_port *uap) + if (ret < 0) { + dev_err(uap->port.dev, + "Failed to register AMBA-PL011 driver\n"); ++ for (i = 0; i < ARRAY_SIZE(amba_ports); i++) ++ if (amba_ports[i] == uap) ++ amba_ports[i] = NULL; + return ret; + } + } +-- +2.16.4 + diff --git a/patches.suse/serial-pl011-Fix-oops-on-EPROBE_DEFER.patch b/patches.suse/serial-pl011-Fix-oops-on-EPROBE_DEFER.patch new file mode 100644 index 0000000..e29092a --- /dev/null +++ b/patches.suse/serial-pl011-Fix-oops-on-EPROBE_DEFER.patch @@ -0,0 +1,98 @@ +From 27afac93e3bd7fa89749cf11da5d86ac9cde4dba Mon Sep 17 00:00:00 2001 +From: Lukas Wunner +Date: Thu, 13 Aug 2020 12:52:40 +0200 +Subject: [PATCH] serial: pl011: Fix oops on -EPROBE_DEFER +Git-commit: 27afac93e3bd7fa89749cf11da5d86ac9cde4dba +Patch-mainline: v5.9-rc3 +References: git-fixes + +If probing of a pl011 gets deferred until after free_initmem(), an oops +ensues because pl011_console_match() is called which has been freed. + +Fix by removing the __init attribute from the function and those it +calls. + +Commit 10879ae5f12e ("serial: pl011: add console matching function") +introduced pl011_console_match() not just for early consoles but +regular preferred consoles, such as those added by acpi_parse_spcr(). +Regular consoles may be registered after free_initmem() for various +reasons, one being deferred probing, another being dynamic enablement +of serial ports using a DeviceTree overlay. + +Thus, pl011_console_match() must not be declared __init and the +functions it calls mustn't either. + +Stack trace for posterity: + +Unable to handle kernel paging request at virtual address 80c38b58 +Internal error: Oops: 8000000d [#1] PREEMPT SMP ARM +PC is at pl011_console_match+0x0/0xfc +LR is at register_console+0x150/0x468 +[<80187004>] (register_console) +[<805a8184>] (uart_add_one_port) +[<805b2b68>] (pl011_register_port) +[<805b3ce4>] (pl011_probe) +[<80569214>] (amba_probe) +[<805ca088>] (really_probe) +[<805ca2ec>] (driver_probe_device) +[<805ca5b0>] (__device_attach_driver) +[<805c8060>] (bus_for_each_drv) +[<805c9dfc>] (__device_attach) +[<805ca630>] (device_initial_probe) +[<805c90a8>] (bus_probe_device) +[<805c95a8>] (deferred_probe_work_func) + +Fixes: 10879ae5f12e ("serial: pl011: add console matching function") +Signed-off-by: Lukas Wunner +Cc: stable@vger.kernel.org # v4.10+ +Cc: Aleksey Makarov +Cc: Peter Hurley +Cc: Russell King +Cc: Christopher Covington +Link: https://lore.kernel.org/r/f827ff09da55b8c57d316a1b008a137677b58921.1597315557.git.lukas@wunner.de +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/tty/serial/amba-pl011.c | 11 +++++------ + 1 file changed, 5 insertions(+), 6 deletions(-) + +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index 7893048bea2d..67498594d7d7 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2241,9 +2241,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) + clk_disable(uap->clk); + } + +-static void __init +-pl011_console_get_options(struct uart_amba_port *uap, int *baud, +- int *parity, int *bits) ++static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, ++ int *parity, int *bits) + { + if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { + unsigned int lcr_h, ibrd, fbrd; +@@ -2276,7 +2275,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, + } + } + +-static int __init pl011_console_setup(struct console *co, char *options) ++static int pl011_console_setup(struct console *co, char *options) + { + struct uart_amba_port *uap; + int baud = 38400; +@@ -2344,8 +2343,8 @@ static int __init pl011_console_setup(struct console *co, char *options) + * + * Returns 0 if console matches; otherwise non-zero to use default matching + */ +-static int __init pl011_console_match(struct console *co, char *name, int idx, +- char *options) ++static int pl011_console_match(struct console *co, char *name, int idx, ++ char *options) + { + unsigned char iotype; + resource_size_t addr; +-- +2.16.4 + diff --git a/patches.suse/tipc-pass-tunnel-dev-as-NULL-to-udp_tunnel-6-_xmit_s.patch b/patches.suse/tipc-pass-tunnel-dev-as-NULL-to-udp_tunnel-6-_xmit_s.patch index 948939f..2d59dc2 100644 --- a/patches.suse/tipc-pass-tunnel-dev-as-NULL-to-udp_tunnel-6-_xmit_s.patch +++ b/patches.suse/tipc-pass-tunnel-dev-as-NULL-to-udp_tunnel-6-_xmit_s.patch @@ -4,7 +4,7 @@ Date: Mon, 17 Jun 2019 21:34:15 +0800 Subject: [PATCH] tipc: pass tunnel dev as NULL to udp_tunnel(6)_xmit_skb Git-commit: c3bcde026684c62d7a2b6f626dc7cf763833875c Patch-mainline: v5.2-rc6 -References: bsc#1051510 +References: bsc#1051510 bsc#1175515 udp_tunnel(6)_xmit_skb() called by tipc_udp_xmit() expects a tunnel device to count packets on dev->tstats, a perpcu variable. However, TIPC is using diff --git a/patches.suse/usb-gadget-f_tcm-Fix-some-resource-leaks-in-some-err.patch b/patches.suse/usb-gadget-f_tcm-Fix-some-resource-leaks-in-some-err.patch new file mode 100644 index 0000000..00b1e34 --- /dev/null +++ b/patches.suse/usb-gadget-f_tcm-Fix-some-resource-leaks-in-some-err.patch @@ -0,0 +1,46 @@ +From 07c8434150f4eb0b65cae288721c8af1080fde17 Mon Sep 17 00:00:00 2001 +From: Christophe JAILLET +Date: Fri, 14 Aug 2020 07:55:01 +0200 +Subject: [PATCH] usb: gadget: f_tcm: Fix some resource leaks in some error paths +Git-commit: 07c8434150f4eb0b65cae288721c8af1080fde17 +Patch-mainline: v5.9-rc3 +References: git-fixes + +If a memory allocation fails within a 'usb_ep_alloc_request()' call, the +already allocated memory must be released. + +Fix a mix-up in the code and free the correct requests. + +Fixes: c52661d60f63 ("usb-gadget: Initial merge of target module for UASP + BOT") +Signed-off-by: Christophe JAILLET +Signed-off-by: Felipe Balbi +Acked-by: Takashi Iwai + +--- + drivers/usb/gadget/function/f_tcm.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c +index d94b814328c8..184165e27908 100644 +--- a/drivers/usb/gadget/function/f_tcm.c ++++ b/drivers/usb/gadget/function/f_tcm.c +@@ -753,12 +753,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) + goto err_sts; + + return 0; ++ + err_sts: +- usb_ep_free_request(fu->ep_status, stream->req_status); +- stream->req_status = NULL; +-err_out: + usb_ep_free_request(fu->ep_out, stream->req_out); + stream->req_out = NULL; ++err_out: ++ usb_ep_free_request(fu->ep_in, stream->req_in); ++ stream->req_in = NULL; + out: + return -ENOMEM; + } +-- +2.16.4 + diff --git a/patches.suse/usb-host-ohci-exynos-Fix-error-handling-in-exynos_oh.patch b/patches.suse/usb-host-ohci-exynos-Fix-error-handling-in-exynos_oh.patch new file mode 100644 index 0000000..3c03967 --- /dev/null +++ b/patches.suse/usb-host-ohci-exynos-Fix-error-handling-in-exynos_oh.patch @@ -0,0 +1,46 @@ +From 1d4169834628d18b2392a2da92b7fbf5e8e2ce89 Mon Sep 17 00:00:00 2001 +From: Tang Bin +Date: Wed, 26 Aug 2020 22:49:31 +0800 +Subject: [PATCH] usb: host: ohci-exynos: Fix error handling in exynos_ohci_probe() +Git-commit: 1d4169834628d18b2392a2da92b7fbf5e8e2ce89 +Patch-mainline: v5.9-rc3 +References: git-fixes + +If the function platform_get_irq() failed, the negative value +returned will not be detected here. So fix error handling in +exynos_ohci_probe(). And when get irq failed, the function +platform_get_irq() logs an error message, so remove redundant +message here. + +Fixes: 62194244cf87 ("USB: Add Samsung Exynos OHCI diver") +Signed-off-by: Zhang Shengju +Cc: stable +Signed-off-by: Tang Bin +Reviewed-by: Krzysztof Kozlowski +Link: https://lore.kernel.org/r/20200826144931.1828-1-tangbin@cmss.chinamobile.com +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/usb/host/ohci-exynos.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c +index bd40e597f256..5f5e8a64c8e2 100644 +--- a/drivers/usb/host/ohci-exynos.c ++++ b/drivers/usb/host/ohci-exynos.c +@@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) + hcd->rsrc_len = resource_size(res); + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "Failed to get IRQ\n"); +- err = -ENODEV; ++ if (irq < 0) { ++ err = irq; + goto fail_io; + } + +-- +2.16.4 + diff --git a/patches.suse/usb-xhci-Fix-ASMedia-ASM1142-DMA-addressing.patch b/patches.suse/usb-xhci-Fix-ASMedia-ASM1142-DMA-addressing.patch new file mode 100644 index 0000000..41ecfd8 --- /dev/null +++ b/patches.suse/usb-xhci-Fix-ASMedia-ASM1142-DMA-addressing.patch @@ -0,0 +1,50 @@ +From ec37198acca7b4c17b96247697406e47aafe0605 Mon Sep 17 00:00:00 2001 +From: Forest Crossman +Date: Mon, 27 Jul 2020 23:24:08 -0500 +Subject: [PATCH] usb: xhci: Fix ASMedia ASM1142 DMA addressing +Git-commit: ec37198acca7b4c17b96247697406e47aafe0605 +Patch-mainline: v5.9-rc1 +References: git-fixes + +I've confirmed that the ASMedia ASM1142 has the same problem as the +ASM2142/ASM3142, in that it too reports that it supports 64-bit DMA +addresses when in fact it does not. As with the ASM2142/ASM3142, this +can cause problems on systems where the upper bits matter, and adding +the XHCI_NO_64BIT_SUPPORT quirk completely fixes the issue. + +Acked-by: Mathias Nyman +Signed-off-by: Forest Crossman +Cc: stable +Link: https://lore.kernel.org/r/20200728042408.180529-3-cyrozap@gmail.com +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/usb/host/xhci-pci.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index baa5af88ca67..3feaafebfe58 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -59,6 +59,7 @@ + #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc + #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 ++#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 + #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 + + static const char hcd_name[] = "xhci_hcd"; +@@ -268,7 +269,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +- pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI) ++ (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +-- +2.16.4 + diff --git a/patches.suse/usb-xhci-define-IDs-for-various-ASMedia-host-control.patch b/patches.suse/usb-xhci-define-IDs-for-various-ASMedia-host-control.patch new file mode 100644 index 0000000..f61f8db --- /dev/null +++ b/patches.suse/usb-xhci-define-IDs-for-various-ASMedia-host-control.patch @@ -0,0 +1,52 @@ +From 1841cb255da41e87bed9573915891d056f80e2e7 Mon Sep 17 00:00:00 2001 +From: Forest Crossman +Date: Mon, 27 Jul 2020 23:24:07 -0500 +Subject: [PATCH] usb: xhci: define IDs for various ASMedia host controllers +Git-commit: 1841cb255da41e87bed9573915891d056f80e2e7 +Patch-mainline: v5.9-rc1 +References: git-fixes + +Not all ASMedia host controllers have a device ID that matches its part +number. #define some of these IDs to make it clearer at a glance which +chips require what quirks. + +Acked-by: Mathias Nyman +Signed-off-by: Forest Crossman +Link: https://lore.kernel.org/r/20200728042408.180529-2-cyrozap@gmail.com +Cc: stable +Signed-off-by: Greg Kroah-Hartman +Acked-by: Takashi Iwai + +--- + drivers/usb/host/xhci-pci.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -59,7 +59,9 @@ + #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba + #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb + #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc ++#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 ++#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 + + static const char hcd_name[] = "xhci_hcd"; + +@@ -234,13 +236,13 @@ static void xhci_pci_quirks(struct devic + xhci->quirks |= XHCI_BROKEN_STREAMS; + + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +- pdev->device == 0x1042) ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) + xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +- pdev->device == 0x1142) ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && +- pdev->device == 0x2142) ++ pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && diff --git a/rpm/check-for-config-changes b/rpm/check-for-config-changes index c409f20..a751e41 100755 --- a/rpm/check-for-config-changes +++ b/rpm/check-for-config-changes @@ -2,8 +2,8 @@ # lines 4 contains a timestamp... differences="$( - diff -bU0 <(sed -e '/CONFIG_GCC_VERSION/ d' -e '/^# .* is not set$/p' -e '/^$\|^#/d' "$1" | sort) \ - <(sed -e '/CONFIG_GCC_VERSION/ d' -e '/^# .* is not set$/p' -e '/^$\|^#/d' "$2" | sort) \ + diff -bU0 <(sed -e '/CONFIG_GCC_VERSION/ d' -e '/CONFIG_LD_VERSION/ d' -e '/CONFIG_CC_VERSION_TEXT/ d' -e '/^# .* is not set$/p' -e '/^$\|^#/d' "$1" | sort) \ + <(sed -e '/CONFIG_GCC_VERSION/ d' -e '/CONFIG_LD_VERSION/ d' -e '/CONFIG_CC_VERSION_TEXT/ d' -e '/^# .* is not set$/p' -e '/^$\|^#/d' "$2" | sort) \ | grep '^[-+][^-+]' )" || true if [ -n "$differences" ]; then diff --git a/rpm/constraints.in b/rpm/constraints.in index 736525f..dd0615e 100644 --- a/rpm/constraints.in +++ b/rpm/constraints.in @@ -42,6 +42,46 @@ + + + + kernel-docs + kernel-docs-rt + + + + 4 + + + + + + + + aarch64 +@BINARY_PACKAGES_XML@ + + + + 30 + + + + + + + + armv7l + armv6l +@BINARY_PACKAGES_XML@ + + + + 20 + + + + diff --git a/rpm/kabi.pl b/rpm/kabi.pl index ffe73be..6176aa5 100755 --- a/rpm/kabi.pl +++ b/rpm/kabi.pl @@ -63,11 +63,16 @@ sub symvers_uses_namespaces { my $line = <$fh>; chomp $line; - # If there are 5 tab delimited fields, then it's a newer (>=5.4) - # Module.symvers format with namespaces. The older Module.symvers - # format only has 4 fields (crc, symbol, module, export type). - my @l = split(/\t/, $line); - if (@l > 4) { + # The new (>=5.4) Module.symvers format has 4 tabs (5 fields): + # + # crc\tsymbol\tmodule\texport_type\tnamespace + # + # The older Module.symvers format only has 3 tabs (4 fields): + # + # crc\tsymbol\tmodule\texport_type + + my $num_tabs = $line =~ tr/\t//; + if ($num_tabs > 3) { return 1; } else { return 0; @@ -83,14 +88,14 @@ sub load_symvers { xopen(my $fh, '<', $file); while (<$fh>) { chomp; - my @l = split(/\t/); + my @l = split(/\t/, $_, -1); if (@l < 4) { print STDERR "$file:$.: unknown line\n"; $errors++; next; } if ($use_namespaces) { - $new = { crc => $l[0], namespace => $l[2], mod => $l[3], type => $l[4] }; + $new = { crc => $l[0], mod => $l[2], type => $l[3], namespace => $l[4] }; } else { $new = { crc => $l[0], mod => $l[2], type => $l[3] }; } diff --git a/rpm/kernel-binary.spec.in b/rpm/kernel-binary.spec.in index b2f7e31..7ad0926 100644 --- a/rpm/kernel-binary.spec.in +++ b/rpm/kernel-binary.spec.in @@ -54,6 +54,9 @@ %define config_vars CONFIG_MODULES CONFIG_MODULE_SIG CONFIG_KMSG_IDS CONFIG_SUSE_KERNEL_SUPPORTED CONFIG_EFI_STUB %{expand:%(eval "$(test -n "%cpu_arch_flavor" && tar -xjf %_sourcedir/config.tar.bz2 --to-stdout config/%cpu_arch_flavor)"; for config in %config_vars; do echo "%%global $config ${!config:-n}"; done)} %define split_extra (%CONFIG_MODULES == "y" && %CONFIG_SUSE_KERNEL_SUPPORTED == "y") +%if %CONFIG_MODULES != "y" + %define klp_symbols 0 +%endif %ifarch %ix86 x86_64 %define install_vdso 1 @@ -207,8 +210,8 @@ BuildArch: i686 # Note that KMPs embed the version of the kernel built against, that's why # the _3 suffix for 2.6.x-based KMPs %define obsolete_kmp() Obsoletes: %1-kmp-%build_flavor <= %2 \ -Provides: %1-kmp = %2 \ -Provides: %1-kmp-%build_flavor = %2 +Provides: %1-kmp = %2.1 \ +Provides: %1-kmp-%build_flavor = %2.1 # sles10 / 10.3 %obsolete_kmp iwlwifi 1.3.27_3 %obsolete_kmp ipw3945 1.2.2_3 @@ -1084,7 +1087,7 @@ kernel module packages) against the %build_flavor flavor of the kernel. /usr/src/linux-obj/%kmp_target_cpu %endif -%if "%livepatch" != "" && %CONFIG_SUSE_KERNEL_SUPPORTED == "y" && "%variant" == "" +%if "%livepatch" != "" && %CONFIG_SUSE_KERNEL_SUPPORTED == "y" && "%variant" == "" && %build_default %if %livepatch == kgraft %define patch_package %{livepatch}-patch %else diff --git a/rpm/kernel-docs.spec.in b/rpm/kernel-docs.spec.in index 9a80554..1bc710d 100644 --- a/rpm/kernel-docs.spec.in +++ b/rpm/kernel-docs.spec.in @@ -46,7 +46,7 @@ BuildRequires: graphviz BuildRequires: graphviz-gd BuildRequires: graphviz-gnome BuildRequires: python-packaging -BuildRequires: python-Sphinx +BuildRequires: python3-Sphinx < 3 BuildRequires: python-six BuildRequires: texlive-anyfontsize %if %build_pdf diff --git a/rpm/kernel-obs-build.spec.in b/rpm/kernel-obs-build.spec.in index 9d68deb..b1daeb2 100644 --- a/rpm/kernel-obs-build.spec.in +++ b/rpm/kernel-obs-build.spec.in @@ -54,7 +54,7 @@ BuildRequires: kernel ExclusiveArch: @ARCHS@ %if 0%{?suse_version} < 1315 -# For SLE 11 +# For SLE 11 BuildRequires: mkinitrd BuildRequires: perl-Bootloader BuildRoot: %{_tmppath}/%{name}-%{version}-build @@ -74,7 +74,7 @@ Release: @RELEASE@ %description This package is repackaging already compiled kernels to make them usable inside of Open Build Service (OBS) VM builds. An initrd with some basic -kernel modules is generated as well, but further kernel modules can be +kernel modules is generated as well, but further kernel modules can be loaded during build when installing the kernel package. %prep @@ -116,7 +116,7 @@ echo "DefaultTasksAccounting=no" >> /etc/systemd/system.conf # a longer list to have them also available for qemu cross builds where x86_64 kernel runs in eg. arm env. # this list of modules where available on build workers of build.opensuse.org, so we stay compatible. -export KERNEL_MODULES="loop dm-mod dm-snapshot binfmt-misc fuse kqemu squashfs ext2 ext3 ext4 reiserfs btrfs xfs nf_conntrack_ipv6 binfmt_misc virtio_pci virtio_mmio virtio_blk virtio_rng fat vfat nls_cp437 nls_iso8859-1 ibmvscsi sd_mod e1000 ibmveth" +export KERNEL_MODULES="loop dm-crypt dm-mod dm-snapshot binfmt-misc fuse kqemu squashfs ext2 ext3 ext4 reiserfs btrfs xfs nf_conntrack_ipv6 binfmt_misc virtio_pci virtio_mmio virtio_blk virtio_rng fat vfat nls_cp437 nls_iso8859-1 ibmvscsi sd_mod e1000 ibmveth overlay" # manually load all modules to make sure they're available for i in $KERNEL_MODULES; do @@ -141,7 +141,7 @@ ROOT="" %endif %if 0%{?suse_version} && 0%{?suse_version} < 1315 -# For SLE 11 +# For SLE 11 /sbin/mkinitrd $ROOT \ -m "$KERNEL_MODULES" \ -k /boot/%{kernel_name}-*-default -M /boot/System.map-*-default -i /tmp/initrd.kvm -B diff --git a/rpm/kernel-source.spec.in b/rpm/kernel-source.spec.in index e6f53b3..0d89d56 100644 --- a/rpm/kernel-source.spec.in +++ b/rpm/kernel-source.spec.in @@ -132,6 +132,7 @@ Recommends: bison Recommends: flex Recommends: libelf-devel Recommends: openssl-devel +%obsolete_rebuilds %name %(chmod +x %_sourcedir/{@SCRIPTS@}) @@ -146,6 +147,7 @@ Linux kernel sources with many fixes and improvements. %source_timestamp %package -n kernel-devel%variant +%obsolete_rebuilds kernel-devel%variant Summary: Development files needed for building kernel modules Group: Development/Sources AutoReqProv: off @@ -173,6 +175,7 @@ This package provides the rpm macros and templates for Kernel Module Pakcages %source_timestamp %package vanilla +%obsolete_rebuilds %name-vanilla Summary: Vanilla Linux kernel sources with minor build fixes Group: Development/Sources AutoReqProv: off @@ -261,6 +264,7 @@ perl "%_sourcedir/group-source-files.pl" \ -L "%src_install_dir" popd +find %{buildroot}/usr/src/linux* -type f -name '*.[ch]' -perm /0111 -exec chmod -v a-x {} + # kernel-source and kernel-$flavor-devel are built independently, but the # shipped sources (/usr/src/linux/) need to be older than generated files # (/usr/src/linux-obj). We rely on the git commit timestamp to not point into diff --git a/rpm/mkspec-dtb b/rpm/mkspec-dtb index 92ca10a..39501c6 100755 --- a/rpm/mkspec-dtb +++ b/rpm/mkspec-dtb @@ -27,6 +27,7 @@ my @armv7l_package_list = ( ['dtb-meson6', 'meson6-*.dts', "Amlogic Meson 6 based systems"], ['dtb-meson8', 'meson8-*.dts', "Amlogic Meson 8 based systems"], ['dtb-meson8b', 'meson8b-*.dts', "Amlogic Meson 8b based systems"], + ['dtb-mt76', 'mt76*.dts', "MediaTek mt76 based systems"], ['dtb-omap3', 'omap3*.dts', "TI OMAP3 based systems"], ['dtb-omap4', 'omap4*.dts', "TI OMAP4 based systems"], ['dtb-omap5', 'omap5*.dts', "TI OMAP5 based systems"], diff --git a/rpm/package-descriptions b/rpm/package-descriptions index 763df71..426ded2 100644 --- a/rpm/package-descriptions +++ b/rpm/package-descriptions @@ -57,22 +57,6 @@ The Standard Kernel - without any SUSE patches The standard kernel - without any SUSE patches -=== kernel-xen === -The Xen Kernel - -The Linux kernel for Xen paravirtualization. - -This kernel can be used both as the domain0 ("xen0") and as an -unprivileged ("xenU") kernel. - -=== kernel-ec2 === -The Amazon EC2 Xen Kernel - -The Linux kernel for Xen paravirtualization. - -This kernel can only be used as an unprivileged ("xenU") kernel -(mainly, but not exclusively, for Amazon EC2). - === kernel-64kb === Kernel with 64kb PAGE_SIZE @@ -87,16 +71,12 @@ Kernel for LPAE enabled systems The kernel for all 32-bit ARM platforms that support LPAE. This includes all Cortex A15 based SoCs, like the Exynos5, OMAP5 or Calxeda ECX-2000. -=== kernel-cubox === -Kernel for SolidRun Cubox - -The standard kernel for Marvell Dove SoC, as found in -the SolidRun Cubox. - -=== kernel-exynos === -Kernel for Samsung's Exynos SoC +=== kernel-preempt === +Kernel with PREEMPT support -The standard kernel for Samsung's Exynos 4 & 5 SoC, as found in the Origen board. +The kernel for arm64 and x86_64 architectures that supports CONFIG_PREEMPT. Its +main purpose is to serve workloads with a higher demand on smaller latencies +than the default kernel in average. === kernel-syzkaller === Kernel used for fuzzing by syzkaller @@ -105,11 +85,6 @@ The syzkaller kernel should be used solely in a virtual machine by syzkaller only. It supports kernel coverage and enables a lot of slow debugging options. -=== kernel-versatile === -Kernel for Versatile SoC - -The standard kernel for Versatile SoC, or for emulation with QEMU. - === kernel-zfcpdump === The IBM System Z zfcpdump Kernel diff --git a/series.conf b/series.conf index daa53c8..46a12f4 100644 --- a/series.conf +++ b/series.conf @@ -43721,6 +43721,7 @@ patches.suse/qed-Fix-blocking-unlimited-SPQ-entries-leak.patch patches.suse/qed-Fix-SPQ-entries-not-returned-to-pool-in-error-fl.patch patches.suse/qed-Fix-potential-memory-corruption.patch + patches.suse/0003-net-stmmac-Fix-RX-packet-size-8191.patch patches.suse/net-smsc95xx-Fix-MTU-range.patch patches.suse/flow_dissector-do-not-dissect-l4-ports-for-fragments.patch patches.suse/net-sched-cls_flower-validate-nested-enc_opts_policy.patch @@ -49877,6 +49878,8 @@ patches.suse/IB-hfi1-Create-inline-to-get-extended-headers.patch patches.suse/net-remove-duplicate-fetch-in-sock_getsockopt.patch patches.suse/tun-wake-up-waitqueues-after-IFF_UP-is-set.patch + patches.suse/ip_tunnel-allow-not-to-count-pkts-on-tstats-by-setti.patch + patches.suse/ip6_tunnel-allow-not-to-count-pkts-on-tstats-by-pass.patch patches.suse/tipc-pass-tunnel-dev-as-NULL-to-udp_tunnel-6-_xmit_s.patch patches.suse/net-udp_gso-Allow-TX-timestamp-with-UDP-GSO.patch patches.suse/net-af_iucv-remove-gfp_dma-restriction-for-hipertransport @@ -51086,6 +51089,7 @@ patches.suse/ixgbe-fix-possible-deadlock-in-ixgbe_service_task.patch patches.suse/net-mlx5e-Use-flow-keys-dissector-to-parse-packets-f.patch patches.suse/net-mlx5e-Only-support-tx-rx-pause-setting-for-port-.patch + patches.suse/msft-hv-1910-hv_netvsc-Fix-a-warning-of-suspicious-RCU-usage.patch patches.suse/tools-bpftool-fix-error-message-prog-object.patch patches.suse/sctp-fix-the-transport-error_count-check.patch patches.suse/ibmveth-Convert-multicast-list-size-for-little-endia.patch @@ -51315,6 +51319,7 @@ patches.suse/x86-intel-aggregate-big-core-graphics-naming.patch patches.suse/x86-intel-aggregate-microserver-naming.patch patches.suse/x86-kconfig-remove-x86_direct_gbpages-dependency-on-debug_pagealloc.patch + patches.suse/msft-hv-1903-x86-hyperv-Create-and-use-Hyper-V-page-definitions.patch patches.suse/platform-x86-pmc_atom-Add-Siemens-SIMATIC-IPC227E-to.patch patches.suse/tools-power-x86-intel-speed-select-Fix-a-read-overfl.patch patches.suse/alarmtimer-Use-EOPNOTSUPP-instead-of-ENOTSUPP.patch @@ -51502,6 +51507,7 @@ patches.suse/rtlwifi-Fix-file-release-memory-leak.patch patches.suse/netfilter-not-mark-a-spinlock-as-__read_mostly.patch patches.suse/net-mlx5e-Remove-unnecessary-clear_bit-s.patch + patches.suse/msft-hv-1945-hv_netvsc-Allow-scatter-gather-feature-to-be-tunable.patch patches.suse/ixgbe-sync-the-first-fragment-unconditionally.patch patches.suse/i40e-Add-support-for-X710-device.patch patches.suse/net-hns3-fix-port-setting-handle-for-fibre-port.patch @@ -51883,6 +51889,7 @@ patches.suse/msft-hv-1947-PCI-hv-Use-bytes-4-and-5-from-instance-ID-as-the-PCI.patch patches.suse/mfd-intel-lpss-Remove-D3cold-delay.patch patches.suse/hypfs-fix-error-number-left-in-struct-pointer-member + patches.suse/msft-hv-1907-hv_balloon-Use-a-static-page-for-the-balloon_up-send.patch patches.suse/platform-x86-pmc_atom-Add-Siemens-SIMATIC-IPC277E-to.patch patches.suse/platform-x86-i2c-multi-instantiate-Derive-the-device.patch patches.suse/fat-work-around-race-with-userspace-s-read-via-block.patch @@ -52546,6 +52553,7 @@ patches.suse/uaccess-Add-non-pagefault-user-space-write-function.patch patches.suse/bpf-Make-use-of-probe_user_write-in-probe-write-help.patch patches.suse/i40e-enable-X710-support.patch + patches.suse/msft-hv-1961-hv_netvsc-flag-software-created-hash-value.patch patches.suse/mt7601u-fix-bbp-version-check-in-mt7601u_wait_bbp_re.patch patches.suse/rtlwifi-prevent-memory-leak-in-rtl_usb_probe.patch patches.suse/libertas-fix-a-potential-NULL-pointer-dereference.patch @@ -52797,6 +52805,7 @@ patches.suse/powerpc-fadump-when-fadump-is-supported-register-the.patch patches.suse/powerpc-mm-drop-ifdef-CONFIG_MMU-in-is_ioremap_addr.patch patches.suse/powerpc-powernv-Disable-native-PCIe-port-management.patch + patches.suse/msft-hv-1980-Drivers-hv-balloon-Remove-dependencies-on-guest-page.patch patches.suse/compat_ioctl-handle-SIOCOUTQNSD.patch patches.suse/clk-samsung-exynos5420-Preserve-CPU-clocks-configura.patch patches.suse/clk-pxa-fix-one-of-the-pxa-RTC-clocks.patch @@ -53013,6 +53022,8 @@ patches.suse/can-slcan-Fix-use-after-free-Read-in-slcan_open.patch patches.suse/net-bridge-deny-dev_set_mac_address-when-unregisteri.patch patches.suse/net-sched-fix-dump-qlen-for-sch_mq-sch_mqprio-with-N.patch + patches.suse/net-ipv6-add-net-argument-to-ip6_dst_lookup_flow.patch + patches.suse/net-ipv6_stub-use-ip6_dst_lookup_flow-instead-of-ip6.patch patches.suse/openvswitch-support-asymmetric-conntrack.patch patches.suse/s390-qeth-guard-against-runt-packets patches.suse/s390-qeth-ensure-linear-access-to-packet-headers @@ -53637,6 +53648,7 @@ patches.suse/PCI-IOV-Fix-memory-leak-in-pci_iov_add_virtfn.patch patches.suse/vfs-fix-do_last-regression.patch patches.suse/cifs-fix-soft-mounts-hanging-in-the-reconnect-code.patch + patches.suse/msft-hv-2011-hv_balloon-Balloon-up-according-to-request-page-numb.patch patches.suse/mfd-dln2-More-sanity-checking-for-endpoints.patch patches.suse/mfd-da9062-Fix-watchdog-compatible-string.patch patches.suse/mfd-rn5t618-Mark-ADC-control-register-volatile.patch @@ -54585,6 +54597,7 @@ patches.suse/0020-dm-verity-fec-fix-hash-block-number-in-verity_fec_de.patch patches.suse/scsi-qla2xxx-set-UNLOADING-before-waiting-for-sessio.patch patches.suse/scsi-qla2xxx-check-UNLOADING-before-posting-async-wo.patch + patches.suse/scsi-target-iblock-fix-WRITE-SAME-zeroing.patch patches.suse/0001-drm-edid-Fix-off-by-one-in-DispID-DTD-pixel-clock.patch patches.suse/drm-qxl-qxl_release-leak-in-qxl_draw_dirty_fb.patch patches.suse/drm-qxl-qxl_release-leak-in-qxl_hw_surface_alloc.patch @@ -54915,7 +54928,18 @@ patches.suse/vfio-pci-invalidate-mmaps-and-block-mmio-access-on-disabled-memory patches.suse/gpiolib-Document-that-GPIO-line-names-are-not-global.patch patches.suse/RDMA-efa-Fix-setting-of-wrong-bit-in-get-set_feature.patch + patches.suse/scsi-qla2xxx-Split-qla2x00_configure_local_loop.patch + patches.suse/scsi-qla2xxx-Use-ARRAY_SIZE-instead-of-open-coding-i.patch patches.suse/scsi-lpfc-remove-duplicate-unloading-checks.patch + patches.suse/scsi-qla2xxx-Fix-MPI-failure-AEN-8200-handling.patch + patches.suse/scsi-qla2xxx-make-1-bit-bit-fields-unsigned-int.patch + patches.suse/scsi-qla2xxx-Fix-warning-after-FC-target-reset.patch + patches.suse/scsi-qla2xxx-Fix-failure-message-in-qlt_disable_vha.patch + patches.suse/scsi-qla2xxx-Fix-issue-with-adapter-s-stopping-state.patch + patches.suse/scsi-qla2xxx-Make-qla_set_ini_mode-return-void.patch + patches.suse/scsi-qla2xxx-Use-true-false-for-need_mpi_reset.patch + patches.suse/scsi-qla2xxx-Use-true-false-for-ha-fw_dumped.patch + patches.suse/scsi-qla2xxx-Make-qlafx00_process_aen-return-void.patch patches.suse/scsi-lpfc-Maintain-atomic-consistency-of-queue_claim.patch patches.suse/scsi-lpfc-Remove-re-binding-of-nvme-rport-during-reg.patch patches.suse/scsi-lpfc-Fix-negation-of-else-clause-in-lpfc_prep_n.patch @@ -54925,6 +54949,22 @@ patches.suse/scsi-lpfc-Fix-MDS-Diagnostic-Enablement-definition.patch patches.suse/scsi-lpfc-Update-lpfc-version-to-12.8.0.1.patch patches.suse/scsi-lpfc-Remove-redundant-initialization-to-variabl.patch + patches.suse/scsi-qla2xxx-Fix-spelling-of-a-variable-name.patch + patches.suse/scsi-qla2xxx-Simplify-the-functions-for-dumping-firm.patch + patches.suse/scsi-qla2xxx-Sort-BUILD_BUG_ON-statements-alphabetic.patch + patches.suse/scsi-qla2xxx-Add-more-BUILD_BUG_ON-statements.patch + patches.suse/scsi-qla2xxx-Make-a-gap-in-struct-qla2xxx_offld_chai.patch + patches.suse/scsi-qla2xxx-Increase-the-size-of-struct-qla_fcp_pri.patch + patches.suse/scsi-qla2xxx-Change-two-hardcoded-constants-into-off.patch + patches.suse/scsi-qla2xxx-Use-register-names-instead-of-register-.patch + patches.suse/scsi-qla2xxx-Fix-the-code-that-reads-from-mailbox-re.patch + patches.suse/scsi-qla2xxx-Change-RD-WRT-_REG_-function-names-from.patch + patches.suse/scsi-qla2xxx-Cast-explicitly-to-uint16_t-uint32_t.patch + patches.suse/scsi-qla2xxx-Use-make_handle-instead-of-open-coding-.patch + patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-header-fi.patch + patches.suse/scsi-qla2xxx-Fix-endianness-annotations-in-source-fi.patch + patches.suse/scsi-qla2xxx-Remove-an-unused-function.patch + patches.suse/scsi-qla2xxx-Remove-return-value-from-qla_nvme_ls.patch patches.suse/scsi-lpfc-Fix-lpfc_nodelist-leak-when-processing-uns.patch patches.suse/ext4-fix-EXT_MAX_EXTENT-INDEX-to-check-for-zeroed-eh.patch patches.suse/ext4-fix-race-between-ext4_sync_parent-and-rename.patch @@ -55089,6 +55129,7 @@ patches.suse/Staging-rtl8723bs-prevent-buffer-overflow-in-update_.patch patches.suse/vfio-pci-fix-sr-iov-vf-handling-with-mmio-blocking patches.suse/scsi-qla2xxx-Set-NVMe-status-code-for-failed-NVMe-FC.patch + patches.suse/scsi-qla2xxx-Keep-initiator-ports-after-RSCN.patch patches.suse/scsi-lpfc-Avoid-another-null-dereference-in-lpfc_sli.patch patches.suse/msft-hv-2106-Drivers-hv-Change-flag-to-write-log-level-in-panic-m.patch patches.suse/nfsd-apply-umask-on-fs-without-ACL-support.patch @@ -55231,6 +55272,7 @@ patches.suse/crypto-cpt-don-t-sleep-of-CRYPTO_TFM_REQ_MAY_SLEEP-w.patch patches.suse/crypto-ccp-Fix-use-of-merged-scatterlists.patch patches.suse/crypto-qat-fix-double-free-in-qat_uclo_create_batch_.patch + patches.suse/0001-block-improve-discard-bio-alignment-in-__blkdev_issu.patch patches.suse/platform-x86-intel-hid-Fix-return-value-check-in-che.patch patches.suse/platform-x86-intel-vbtn-Fix-return-value-check-in-ch.patch patches.suse/regulator-gpio-Honor-regulator-boot-on-property.patch @@ -55244,16 +55286,24 @@ patches.suse/clk-st-Remove-uninitialized_var-usage.patch patches.suse/clk-spear-Remove-uninitialized_var-usage.patch patches.suse/KVM-PPC-Book3S-PR-Remove-uninitialized_var-usage.patch + patches.suse/0001-bcache-allocate-meta-data-pages-as-compound-pages.patch patches.suse/nvme-multipath-fix-logic-for-non-optimized-paths.patch patches.suse/nvme-multipath-do-not-fall-back-to-__nvme_find_path-.patch patches.suse/md-raid5-Fix-Force-reconstruct-write-io-stuck-in-deg.patch patches.suse/devres-keep-both-device-name-and-resource-name-in-pr.patch patches.suse/driver-core-Avoid-binding-drivers-to-dead-devices.patch + patches.suse/USB-rename-USB-quirk-to-USB_QUIRK_ENDPOINT_IGNORE.patch patches.suse/usbip-tools-fix-module-name-in-man-page.patch patches.suse/USB-iowarrior-fix-up-report-size-handling-for-some-d.patch patches.suse/USB-serial-cp210x-enable-usb-generic-throttle-unthro.patch patches.suse/USB-serial-cp210x-re-enable-auto-RTS-on-open.patch + patches.suse/USB-serial-ftdi_sio-make-process-packet-buffer-unsig.patch + patches.suse/USB-serial-ftdi_sio-clean-up-receive-processing.patch + patches.suse/USB-serial-ftdi_sio-fix-break-and-sysrq-handling.patch patches.suse/USB-serial-iuu_phoenix-fix-led-activity-helpers.patch + patches.suse/USB-serial-qcserial-add-EM7305-QDL-product-ID.patch + patches.suse/usb-xhci-define-IDs-for-various-ASMedia-host-control.patch + patches.suse/usb-xhci-Fix-ASMedia-ASM1142-DMA-addressing.patch patches.suse/leds-lm355x-avoid-enum-conversion-warning.patch patches.suse/leds-88pm860x-fix-use-after-free-on-unbind.patch patches.suse/leds-da903x-fix-use-after-free-on-unbind.patch @@ -55286,6 +55336,7 @@ patches.suse/drm-nouveau-fix-multiple-instances-of-reference-coun.patch patches.suse/drm-ttm-nouveau-don-t-call-tt-destroy-callback-on-al.patch patches.suse/drm-msm-ratelimit-crtc-event-overflow-error.patch + patches.suse/0014-net-mlx5e-vxlan-Use-RCU-for-vxlan-table-lookup.patch patches.suse/0005-ipvs-allow-connection-reuse-for-unconfirmed-conntrac.patch patches.suse/0004-net-make-symbol-flush_works-static.patch patches.suse/net-ena-Fix-using-plain-integer-as-NULL-pointer-in-e.patch @@ -55309,10 +55360,20 @@ patches.suse/0003-Bluetooth-Prevent-out-of-bounds-read-in-hci_inquiry_.patch patches.suse/Bluetooth-Fix-update-of-connection-state-in-hci_encr.patch patches.suse/0003-openvswitch-Prevent-kernel-infoleak-in-ovs_ct_put_ke.patch + patches.suse/0013-net-mvpp2-fix-memory-leak-in-mvpp2_rx.patch patches.suse/0002-net-gre-recompute-gre-csum-for-sctp-over-gre-tunnels.patch + patches.suse/0012-net-ethernet-aquantia-Fix-wrong-return-value.patch patches.suse/liquidio-Fix-wrong-return-value-in-cn23xx_get_pf_num.patch + patches.suse/0011-net-spider_net-Fix-the-size-used-in-a-dma_free_coher.patch + patches.suse/0010-fsl-fman-use-32-bit-unsigned-integer.patch + patches.suse/0009-fsl-fman-fix-dereference-null-return-value.patch + patches.suse/0008-fsl-fman-fix-unreachable-code.patch + patches.suse/0007-fsl-fman-check-dereferencing-null-pointer.patch + patches.suse/0006-fsl-fman-fix-eth-hash-table-allocation.patch + patches.suse/0005-net-mlx5-Delete-extra-dump-stack-that-gives-nothing.patch patches.suse/wl1251-fix-always-return-0-error.patch patches.suse/dpaa2-eth-Fix-passing-zero-to-PTR_ERR-warning.patch + patches.suse/msft-hv-2126-hv_netvsc-do-not-use-VF-device-if-link-is-down.patch patches.suse/selftests-livepatch-simplify-test-klp-callbacks-busy-target-tests.patch patches.suse/selftests-livepatch-rework-test-klp-shadow-vars.patch patches.suse/selftests-livepatch-more-verification-in-test-klp-shadow-vars.patch @@ -55335,6 +55396,18 @@ patches.suse/ALSA-pci-delete-repeated-words-in-comments.patch patches.suse/staging-rtl8192u-fix-a-dubious-looking-mask-before-a.patch patches.suse/vgacon-fix-out-of-bounds-write-to-the-scrollback-buf.patch + patches.suse/scsi-Fix-trivial-spelling.patch + patches.suse/scsi-qla2xxx-Check-the-size-of-struct-fcp_hdr-at-com.patch + patches.suse/scsi-qla2xxx-Remove-the-__packed-annotation-from-str.patch + patches.suse/scsi-qla2xxx-Make-qla82xx_flash_wait_write_finish-ea.patch + patches.suse/scsi-qla2xxx-Initialize-n-before-using-it.patch + patches.suse/scsi-qla2xxx-Remove-a-superfluous-cast.patch + patches.suse/scsi-qla2xxx-Make-__qla2x00_alloc_iocbs-initialize-3.patch + patches.suse/scsi-qla2xxx-Fix-a-Coverity-complaint-in-qla2100_fw_.patch + patches.suse/scsi-qla2xxx-Make-qla2x00_restart_isp-easier-to-read.patch + patches.suse/scsi-qla2xxx-Introduce-a-function-for-computing-the-.patch + patches.suse/scsi-qla2xxx-Change-in-PUREX-to-handle-FPIN-ELS-requ.patch + patches.suse/scsi-qla2xxx-SAN-congestion-management-implementatio.patch patches.suse/scsi-lpfc-Fix-unused-assignment-in-lpfc_sli4_bsg_lin.patch patches.suse/scsi-lpfc-Fix-missing-MDS-functionality.patch patches.suse/scsi-lpfc-Fix-NVMe-rport-deregister-and-registration.patch @@ -55351,6 +55424,7 @@ patches.suse/scsi-lpfc-Fix-inconsistent-indenting.patch patches.suse/scsi-lpfc-Fix-interrupt-assignments-when-multiple-ve.patch patches.suse/scsi-lpfc-Fix-less-than-zero-comparison-of-unsigned-.patch + patches.suse/scsi-qla2xxx-Address-a-set-of-sparse-warnings.patch patches.suse/cifs-merge-__-cifs-smb2-_reconnect-into-cifs_tree_connect-.patch patches.suse/cifs-reduce-number-of-referral-requests-in-DFS-link-lookups.patch patches.suse/cifs-rename-reconn_inval_dfs_target-.patch @@ -55373,15 +55447,20 @@ patches.suse/powerpc-Allow-4224-bytes-of-stack-expansion-for-the-.patch patches.suse/powerpc-boot-Fix-CONFIG_PPC_MPC52XX-references.patch patches.suse/powerpc-pseries-hotplug-cpu-Remove-double-free-in-er.patch + patches.suse/0001-xen-balloon-fix-accounting-in-alloc_xenballooned_pag.patch + patches.suse/0001-xen-balloon-make-the-balloon-wait-interruptible.patch + patches.suse/0001-Revert-xen-balloon-Fix-crash-when-ballooning-on-x86-.patch patches.suse/xfs-fix-reflink-quota-reservation-accounting-error.patch patches.suse/xfs-fix-inode-allocation-block-res-calculation-prece.patch patches.suse/ocfs2-fix-remounting-needed-after-setfacl-command.patch patches.suse/ocfs2-change-slot-number-type-s16-to-u16.patch patches.suse/mm-filemap-clear-idle-flag-for-writes.patch + patches.suse/media-budget-core-Improve-exception-handling-in-budg.patch patches.suse/media-omap3isp-Add-missed-v4l2_ctrl_handler_free-for.patch patches.suse/media-firewire-Using-uninitialized-values-in-node_pr.patch patches.suse/media-exynos4-is-Add-missed-check-for-pinctrl_lookup.patch patches.suse/go7007-add-sanity-checking-for-endpoints.patch + patches.suse/media-vpss-clean-up-resources-in-init.patch patches.suse/tracepoint-mark-_tracepoint_string-s-_used.patch patches.suse/tracing-use-trace_sched_process_free-instead-of-exit-for-pid-tracing.patch patches.suse/PCI-ASPM-Add-missing-newline-in-sysfs-policy.patch @@ -55393,6 +55472,7 @@ patches.suse/power-supply-check-if-calc_soc-succeeded-in-pm860x_i.patch patches.suse/pinctrl-single-fix-pcs_parse_pinconf-return-value.patch patches.suse/pinctrl-single-fix-function-name-in-documentation.patch + patches.suse/Input-psmouse-add-a-newline-when-printing-proto-by-s.patch patches.suse/Input-sentelic-fix-error-return-when-fsp_reg_write-f.patch patches.suse/include-linux-poison.h-remove-obsolete-comment.patch patches.suse/0006-drm-fix-drm_dp_mst_port-refcount-leaks-in-drm_dp_mst.patch @@ -55411,9 +55491,11 @@ patches.suse/i2c-rcar-slave-only-send-STOP-event-when-we-have-bee.patch patches.suse/0001-net-Set-fput_needed-iff-FDPUT_FPUT-is-set.patch patches.suse/vmxnet3-use-correct-tcp-hdr-length-when-packet-is-en.patch + patches.suse/0004-net-qcom-emac-add-missed-clk_disable_unprepare-in-er.patch patches.suse/net-stmmac-dwmac1000-provide-multicast-filter-fallba.patch patches.suse/net-ethernet-stmmac-Disable-hardware-multicast-filte.patch patches.suse/msft-hv-2127-Drivers-hv-vmbus-Only-notify-Hyper-V-for-die-events-.patch + patches.suse/0001-drm-xen-front-Fix-misused-IS_ERR_OR_NULL-checks.patch patches.suse/ALSA-usb-audio-fix-spelling-mistake-buss-bus.patch patches.suse/ALSA-usb-audio-Creative-USB-X-Fi-Pro-SB1095-volume-k.patch patches.suse/ALSA-hda-realtek-Fix-pin-default-on-Intel-NUC-8-Rugg.patch @@ -55430,6 +55512,7 @@ patches.suse/mfd-arizona-Ensure-32k-clock-is-put-on-driver-unbind.patch patches.suse/mfd-dln2-Run-event-handler-loop-under-spinlock.patch patches.suse/cifs-Fix-an-error-pointer-dereference-in-cifs_mount-.patch + patches.suse/0002-block-check-queue-s-limits.discard_granularity-in-__.patch patches.suse/md-cluster-Fix-potential-error-pointer-dereference-i.patch patches.suse/i40e-Set-RX_ONLY-mode-for-unicast-promiscuous-on-VLA.patch patches.suse/i40e-Fix-crash-during-removing-i40e-driver.patch @@ -55437,11 +55520,13 @@ patches.suse/bonding-show-saner-speed-for-broadcast-mode.patch patches.suse/bonding-fix-a-potential-double-unregister.patch patches.suse/ipvlan-fix-device-features.patch + patches.suse/drm-amd-display-fix-pow-crashing-when-given-base-0.patch patches.suse/ALSA-hda-realtek-Add-quirk-for-Samsung-Galaxy-Flex-B.patch patches.suse/ALSA-usb-audio-Add-capture-support-for-Saffire-6-USB.patch patches.suse/ALSA-usb-audio-Update-documentation-comment-for-MS21.patch patches.suse/ALSA-hda-realtek-Add-model-alc298-samsung-headphone.patch patches.suse/ALSA-usb-audio-ignore-broken-processing-extension-un.patch + patches.suse/ASoC-intel-Fix-memleak-in-sst_media_open.patch patches.suse/ALSA-hda-realtek-Add-quirk-for-Samsung-Galaxy-Book-I.patch patches.suse/ext4-fix-potential-negative-array-index-in-do_split.patch patches.suse/jbd2-add-the-missing-unlock_buffer-in-the-error-path.patch @@ -55450,15 +55535,44 @@ patches.suse/ext4-check-journal-inode-extents-more-carefully.patch patches.suse/ext4-fix-checking-of-directory-entry-validity-for-in.patch patches.suse/mm-vunmap-add-cond_resched-in-vunmap_pmd_range.patch + patches.suse/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch + patches.suse/scsi-qla2xxx-Use-MBX_TOV_SECONDS-for-mailbox-command.patch + patches.suse/scsi-qla2xxx-Flush-all-sessions-on-zone-disable.patch + patches.suse/scsi-qla2xxx-Flush-I-O-on-zone-disable.patch + patches.suse/scsi-qla2xxx-Indicate-correct-supported-speeds-for-M.patch + patches.suse/scsi-qla2xxx-Fix-login-timeout.patch + patches.suse/scsi-qla2xxx-Reduce-noisy-debug-message.patch + patches.suse/scsi-qla2xxx-Allow-ql2xextended_error_logging-specia.patch + patches.suse/scsi-qla2xxx-Fix-WARN_ON-in-qla_nvme_register_hba.patch + patches.suse/scsi-qla2xxx-Check-if-FW-supports-MQ-before-enabling.patch + patches.suse/scsi-qla2xxx-Fix-null-pointer-access-during-disconne.patch + patches.suse/Revert-scsi-qla2xxx-Fix-crash-on-qla2x00_mailbox_com.patch + patches.suse/Revert-scsi-qla2xxx-Disable-T10-DIF-feature-with-FC-.patch patches.suse/bonding-fix-active-backup-failover-for-current-ARP-s.patch patches.suse/net-ena-Prevent-reset-after-device-destruction.patch patches.suse/net-ena-Change-WARN_ON-expression-in-ena_del_napi_in.patch patches.suse/net-ena-Make-missed_tx-stat-incremental.patch + patches.suse/msft-hv-2130-hv_netvsc-Remove-unlikely-from-netvsc_select_queue.patch + patches.suse/msft-hv-2131-hv_netvsc-Fix-the-queue_mapping-in-netvsc_vf_xmit.patch patches.suse/net-dsa-b53-check-for-timeout.patch patches.suse/powerpc-pseries-hotplug-cpu-wait-indefinitely-for-vC.patch patches.suse/powerpc-pseries-Do-not-initiate-shutdown-when-system.patch patches.suse/nvme-multipath-round-robin-fix-single-non-optimized-path-case.patch patches.suse/nvme-multipath-round-robin-eliminate-fallback-variable.patch + patches.suse/serial-pl011-Don-t-leak-amba_ports-entry-on-driver-r.patch + patches.suse/serial-pl011-Fix-oops-on-EPROBE_DEFER.patch + patches.suse/serial-8250-change-lock-order-in-serial8250_do_start.patch + patches.suse/drm-msm-adreno-fix-updating-ring-fence.patch + patches.suse/PM-sleep-core-Fix-the-handling-of-pending-runtime-re.patch + patches.suse/device-property-Fix-the-secondary-firmware-node-hand.patch + patches.suse/i2c-rcar-in-slave-mode-clear-NACK-earlier.patch + patches.suse/USB-cdc-acm-rework-notification_buffer-resizing.patch + patches.suse/usb-gadget-f_tcm-Fix-some-resource-leaks-in-some-err.patch + patches.suse/usb-host-ohci-exynos-Fix-error-handling-in-exynos_oh.patch + patches.suse/USB-Ignore-UAS-for-JMicron-JMS567-ATA-ATAPI-Bridge.patch + + # davem/net + patches.suse/ibmvnic-fix-NULL-tx_pools-and-rx_tools-issue-at-do_r.patch # jejb/scsi for-next patches.suse/scsi-smartpqi-identify-physical-devices-without-issuing-inquiry.patch @@ -55495,6 +55609,7 @@ patches.suse/bsc1170284-ixgbe_dont_check_firmware_errors.patch patches.suse/nvdimm-Avoid-race-between-probe-and-reading-device-a.patch patches.suse/ibmveth-Fix-use-of-ibmveth-in-a-bridge.patch + patches.suse/char-virtio-Select-VIRTIO-from-VIRTIO_CONSOLE.patch ######################################################## # end of sorted patches @@ -56294,6 +56409,8 @@ patches.kabi/SUNRPC-defer-slow-parts-of-rpc_free_client-to-a-work-kabi.patch patches.kabi/sock_cgroup_data-kabi-fix.patch patches.kabi/genetlink-remove-genl_bind.patch + patches.kabi/kabi-hide-new-parameter-of-ip6_dst_lookup_flow.patch + patches.kabi/kabi-mask-changes-to-struct-ipv6_stub.patch ######################################################## # You'd better have a good reason for adding a patch