From 0c761028f562f6b1e01fcf32ae8284f07a4fc44f Mon Sep 17 00:00:00 2001 From: Nicolas Morey Date: May 26 2023 17:43:05 +0000 Subject: Merge remote-tracking branch 'origin/cve/linux-4.12' into users/nmoreychaisemartin/SLE12-SP5/merge-cve-4.12 * origin/cve/linux-4.12: RDMA/core: Refactor rdma_bind_addr (bsc#1210629 CVE-2023-2176) RDMA/cma: Ensure rdma_addr_cancel() happens before issuing more requests (bsc#1210629 CVE-2023-2176) RDMA/cma: Do not change route.addr.src_addr outside state checks (bsc#1210629 CVE-2023-2176) RDMA/cma: Make the locking for automatic state transition more clear (bsc#1210629 CVE-2023-2176) --- diff --git a/patches.suse/RDMA-cma-Do-not-change-route.addr.src_addr-outside-s.patch b/patches.suse/RDMA-cma-Do-not-change-route.addr.src_addr-outside-s.patch new file mode 100644 index 0000000..9c3bef2 --- /dev/null +++ b/patches.suse/RDMA-cma-Do-not-change-route.addr.src_addr-outside-s.patch @@ -0,0 +1,119 @@ +From 22e9f71072fa605cbf033158db58e0790101928d Mon Sep 17 00:00:00 2001 +From: Jason Gunthorpe +Date: Wed, 23 Feb 2022 11:23:57 -0400 +Subject: [PATCH 1/1] RDMA/cma: Do not change route.addr.src_addr outside state + checks +Git-commit: 22e9f71072fa605cbf033158db58e0790101928d +Patch-mainline: v5.17 +References: bsc#1210629 CVE-2023-2176 + +If the state is not idle then resolve_prepare_src() should immediately +fail and no change to global state should happen. However, it +unconditionally overwrites the src_addr trying to build a temporary any +address. + +For instance if the state is already RDMA_CM_LISTEN then this will corrupt +the src_addr and would cause the test in cma_cancel_operation(): + + if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) + +Which would manifest as this trace from syzkaller: + + BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26 + Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204 + + CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0 + Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 + Call Trace: + __dump_stack lib/dump_stack.c:79 [inline] + dump_stack+0x141/0x1d7 lib/dump_stack.c:120 + print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232 + __kasan_report mm/kasan/report.c:399 [inline] + kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416 + __list_add_valid+0x93/0xa0 lib/list_debug.c:26 + __list_add include/linux/list.h:67 [inline] + list_add_tail include/linux/list.h:100 [inline] + cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline] + rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751 + ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102 + ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732 + vfs_write+0x28e/0xa30 fs/read_write.c:603 + ksys_write+0x1ee/0x250 fs/read_write.c:658 + do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 + entry_SYSCALL_64_after_hwframe+0x44/0xae + +This is indicating that an rdma_id_private was destroyed without doing +cma_cancel_listens(). + +Instead of trying to re-use the src_addr memory to indirectly create an +any address derived from the dst build one explicitly on the stack and +bind to that as any other normal flow would do. rdma_bind_addr() will copy +it over the src_addr once it knows the state is valid. + +This is similar to commit bc0bdc5afaa7 ("RDMA/cma: Do not change +route.addr.src_addr.ss_family") + +Link: https://lore.kernel.org/r/0-v2-e975c8fd9ef2+11e-syz_cma_srcaddr_jgg@nvidia.com +Cc: stable@vger.kernel.org +Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear") +Reported-by: syzbot+c94a3675a626f6333d74@syzkaller.appspotmail.com +Reviewed-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Acked-by: Nicolas Morey +--- + drivers/infiniband/core/cma.c | 40 +++++++++++++++++++++-------------- + 1 file changed, 24 insertions(+), 16 deletions(-) + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index c447526288f4..50c53409ceb6 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3370,22 +3370,30 @@ err: + static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + const struct sockaddr *dst_addr) + { +- if (!src_addr || !src_addr->sa_family) { +- src_addr = (struct sockaddr *) &id->route.addr.src_addr; +- src_addr->sa_family = dst_addr->sa_family; +- if (IS_ENABLED(CONFIG_IPV6) && +- dst_addr->sa_family == AF_INET6) { +- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; +- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; +- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; +- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) +- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; +- } else if (dst_addr->sa_family == AF_IB) { +- ((struct sockaddr_ib *) src_addr)->sib_pkey = +- ((struct sockaddr_ib *) dst_addr)->sib_pkey; +- } ++ struct sockaddr_storage zero_sock = {}; ++ ++ if (src_addr && src_addr->sa_family) ++ return rdma_bind_addr(id, src_addr); ++ ++ /* ++ * When the src_addr is not specified, automatically supply an any addr ++ */ ++ zero_sock.ss_family = dst_addr->sa_family; ++ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { ++ struct sockaddr_in6 *src_addr6 = ++ (struct sockaddr_in6 *)&zero_sock; ++ struct sockaddr_in6 *dst_addr6 = ++ (struct sockaddr_in6 *)dst_addr; ++ ++ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; ++ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) ++ id->route.addr.dev_addr.bound_dev_if = ++ dst_addr6->sin6_scope_id; ++ } else if (dst_addr->sa_family == AF_IB) { ++ ((struct sockaddr_ib *)&zero_sock)->sib_pkey = ++ ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } +- return rdma_bind_addr(id, src_addr); ++ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); + } + + /* +-- +2.39.1.1.gbe015eda0162 + diff --git a/patches.suse/RDMA-cma-Make-the-locking-for-automatic-state-transi.patch b/patches.suse/RDMA-cma-Make-the-locking-for-automatic-state-transi.patch new file mode 100644 index 0000000..73b5765 --- /dev/null +++ b/patches.suse/RDMA-cma-Make-the-locking-for-automatic-state-transi.patch @@ -0,0 +1,129 @@ +From 732d41c545bb359cbb8c94698bdc1f8bcf82279c Mon Sep 17 00:00:00 2001 +From: Jason Gunthorpe +Date: Wed, 2 Sep 2020 11:11:16 +0300 +Subject: [PATCH 1/1] RDMA/cma: Make the locking for automatic state transition + more clear +Git-commit: 732d41c545bb359cbb8c94698bdc1f8bcf82279c +Patch-mainline: v5.10 +References: bsc#1210629 CVE-2023-2176 + +Re-organize things so the state variable is not read unlocked. The first +attempt to go directly from ADDR_BOUND immediately tells us if the ID is +already bound, if we can't do that then the attempt inside +rdma_bind_addr() to go from IDLE to ADDR_BOUND confirms the ID needs +binding. + +Link: https://lore.kernel.org/r/20200902081122.745412-3-leon@kernel.org +Signed-off-by: Leon Romanovsky +Signed-off-by: Jason Gunthorpe +Acked-by: Nicolas Morey +--- + drivers/infiniband/core/cma.c | 67 +++++++++++++++++++++++------------ + 1 file changed, 45 insertions(+), 22 deletions(-) + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 6f492906939b..11d369b7faca 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3098,32 +3098,54 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, + return rdma_bind_addr(id, src_addr); + } + +-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, +- const struct sockaddr *dst_addr, unsigned long timeout_ms) ++/* ++ * If required, resolve the source address for bind and leave the id_priv in ++ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior ++ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is ++ * ignored. ++ */ ++static int resolve_prepare_src(struct rdma_id_private *id_priv, ++ struct sockaddr *src_addr, ++ const struct sockaddr *dst_addr) + { +- struct rdma_id_private *id_priv; + int ret; + +- id_priv = container_of(id, struct rdma_id_private, id); + memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); +- if (id_priv->state == RDMA_CM_IDLE) { +- ret = cma_bind_addr(id, src_addr, dst_addr); +- if (ret) { +- memset(cma_dst_addr(id_priv), 0, +- rdma_addr_size(dst_addr)); +- return ret; ++ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { ++ /* For a well behaved ULP state will be RDMA_CM_IDLE */ ++ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); ++ if (ret) ++ goto err_dst; ++ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, ++ RDMA_CM_ADDR_QUERY))) { ++ ret = -EINVAL; ++ goto err_dst; + } + } + + if (cma_family(id_priv) != dst_addr->sa_family) { +- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_state; + } ++ return 0; + +- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { +- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); +- return -EINVAL; +- } ++err_state: ++ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); ++err_dst: ++ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); ++ return ret; ++} ++ ++int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, ++ const struct sockaddr *dst_addr, unsigned long timeout_ms) ++{ ++ struct rdma_id_private *id_priv = ++ container_of(id, struct rdma_id_private, id); ++ int ret; ++ ++ ret = resolve_prepare_src(id_priv, src_addr, dst_addr); ++ if (ret) ++ return ret; + + if (cma_any_addr(dst_addr)) { + ret = cma_resolve_loopback(id_priv); +@@ -3511,20 +3533,21 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, + + int rdma_listen(struct rdma_cm_id *id, int backlog) + { +- struct rdma_id_private *id_priv; ++ struct rdma_id_private *id_priv = ++ container_of(id, struct rdma_id_private, id); + int ret; + +- id_priv = container_of(id, struct rdma_id_private, id); +- if (id_priv->state == RDMA_CM_IDLE) { ++ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { ++ /* For a well behaved ULP state will be RDMA_CM_IDLE */ + id->route.addr.src_addr.ss_family = AF_INET; + ret = rdma_bind_addr(id, cma_src_addr(id_priv)); + if (ret) + return ret; ++ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, ++ RDMA_CM_LISTEN))) ++ return -EINVAL; + } + +- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) +- return -EINVAL; +- + if (id_priv->reuseaddr) { + ret = cma_bind_listen(id_priv); + if (ret) +-- +2.39.1.1.gbe015eda0162 + diff --git a/patches.suse/RDMA-core-Refactor-rdma_bind_addr.patch b/patches.suse/RDMA-core-Refactor-rdma_bind_addr.patch new file mode 100644 index 0000000..fb09654 --- /dev/null +++ b/patches.suse/RDMA-core-Refactor-rdma_bind_addr.patch @@ -0,0 +1,330 @@ +From 8d037973d48c026224ab285e6a06985ccac6f7bf Mon Sep 17 00:00:00 2001 +From: Patrisious Haddad +Date: Wed, 4 Jan 2023 10:01:38 +0200 +Subject: [PATCH 1/1] RDMA/core: Refactor rdma_bind_addr +Git-commit: 8d037973d48c026224ab285e6a06985ccac6f7bf +Patch-mainline: v6.3-rc1 +References: bsc#1210629 CVE-2023-2176 + +Refactor rdma_bind_addr function so that it doesn't require that the +cma destination address be changed before calling it. + +So now it will update the destination address internally only when it is +really needed and after passing all the required checks. + +Which in turn results in a cleaner and more sensible call and error +handling flows for the functions that call it directly or indirectly. + +Signed-off-by: Patrisious Haddad +Reported-by: Wei Chen +Reviewed-by: Mark Zhang +Link: https://lore.kernel.org/r/3d0e9a2fd62bc10ba02fed1c7c48a48638952320.1672819273.git.leonro@nvidia.com +Signed-off-by: Leon Romanovsky +Acked-by: Nicolas Morey +--- + drivers/infiniband/core/cma.c | 253 +++++++++++++++++----------------- + 1 file changed, 130 insertions(+), 123 deletions(-) + +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 68721ff10255..b9da636fe1fb 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -3541,121 +3541,6 @@ err: + return ret; + } + +-static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, +- const struct sockaddr *dst_addr) +-{ +- struct sockaddr_storage zero_sock = {}; +- +- if (src_addr && src_addr->sa_family) +- return rdma_bind_addr(id, src_addr); +- +- /* +- * When the src_addr is not specified, automatically supply an any addr +- */ +- zero_sock.ss_family = dst_addr->sa_family; +- if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { +- struct sockaddr_in6 *src_addr6 = +- (struct sockaddr_in6 *)&zero_sock; +- struct sockaddr_in6 *dst_addr6 = +- (struct sockaddr_in6 *)dst_addr; +- +- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; +- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) +- id->route.addr.dev_addr.bound_dev_if = +- dst_addr6->sin6_scope_id; +- } else if (dst_addr->sa_family == AF_IB) { +- ((struct sockaddr_ib *)&zero_sock)->sib_pkey = +- ((struct sockaddr_ib *)dst_addr)->sib_pkey; +- } +- return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); +-} +- +-/* +- * If required, resolve the source address for bind and leave the id_priv in +- * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior +- * calls made by ULP, a previously bound ID will not be re-bound and src_addr is +- * ignored. +- */ +-static int resolve_prepare_src(struct rdma_id_private *id_priv, +- struct sockaddr *src_addr, +- const struct sockaddr *dst_addr) +-{ +- int ret; +- +- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); +- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { +- /* For a well behaved ULP state will be RDMA_CM_IDLE */ +- ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); +- if (ret) +- goto err_dst; +- if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, +- RDMA_CM_ADDR_QUERY))) { +- ret = -EINVAL; +- goto err_dst; +- } +- } +- +- if (cma_family(id_priv) != dst_addr->sa_family) { +- ret = -EINVAL; +- goto err_state; +- } +- return 0; +- +-err_state: +- cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); +-err_dst: +- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr)); +- return ret; +-} +- +-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, +- const struct sockaddr *dst_addr, unsigned long timeout_ms) +-{ +- struct rdma_id_private *id_priv = +- container_of(id, struct rdma_id_private, id); +- int ret; +- +- ret = resolve_prepare_src(id_priv, src_addr, dst_addr); +- if (ret) +- return ret; +- +- if (cma_any_addr(dst_addr)) { +- ret = cma_resolve_loopback(id_priv); +- } else { +- if (dst_addr->sa_family == AF_IB) { +- ret = cma_resolve_ib_addr(id_priv); +- } else { +- /* +- * The FSM can return back to RDMA_CM_ADDR_BOUND after +- * rdma_resolve_ip() is called, eg through the error +- * path in addr_handler(). If this happens the existing +- * request must be canceled before issuing a new one. +- * Since canceling a request is a bit slow and this +- * oddball path is rare, keep track once a request has +- * been issued. The track turns out to be a permanent +- * state since this is the only cancel as it is +- * immediately before rdma_resolve_ip(). +- */ +- if (id_priv->used_resolve_ip) +- rdma_addr_cancel(&id->route.addr.dev_addr); +- else +- id_priv->used_resolve_ip = 1; +- ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, +- &id->route.addr.dev_addr, +- timeout_ms, addr_handler, +- false, id_priv); +- } +- } +- if (ret) +- goto err; +- +- return 0; +-err: +- cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); +- return ret; +-} +-EXPORT_SYMBOL(rdma_resolve_addr); +- + int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) + { + struct rdma_id_private *id_priv; +@@ -4058,27 +3943,26 @@ err: + } + EXPORT_SYMBOL(rdma_listen); + +-int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) ++static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, ++ struct sockaddr *addr, const struct sockaddr *daddr) + { +- struct rdma_id_private *id_priv; ++ struct sockaddr *id_daddr; + int ret; +- struct sockaddr *daddr; + + if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && + addr->sa_family != AF_IB) + return -EAFNOSUPPORT; + +- id_priv = container_of(id, struct rdma_id_private, id); + if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) + return -EINVAL; + +- ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); ++ ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); + if (ret) + goto err1; + + memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); + if (!cma_any_addr(addr)) { +- ret = cma_translate_addr(addr, &id->route.addr.dev_addr); ++ ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); + if (ret) + goto err1; + +@@ -4098,8 +3982,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) + } + #endif + } +- daddr = cma_dst_addr(id_priv); +- daddr->sa_family = addr->sa_family; ++ id_daddr = cma_dst_addr(id_priv); ++ if (daddr != id_daddr) ++ memcpy(id_daddr, daddr, rdma_addr_size(addr)); ++ id_daddr->sa_family = addr->sa_family; + + ret = cma_get_port(id_priv); + if (ret) +@@ -4115,6 +4001,127 @@ err1: + cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); + return ret; + } ++ ++static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, ++ const struct sockaddr *dst_addr) ++{ ++ struct rdma_id_private *id_priv = ++ container_of(id, struct rdma_id_private, id); ++ struct sockaddr_storage zero_sock = {}; ++ ++ if (src_addr && src_addr->sa_family) ++ return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); ++ ++ /* ++ * When the src_addr is not specified, automatically supply an any addr ++ */ ++ zero_sock.ss_family = dst_addr->sa_family; ++ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { ++ struct sockaddr_in6 *src_addr6 = ++ (struct sockaddr_in6 *)&zero_sock; ++ struct sockaddr_in6 *dst_addr6 = ++ (struct sockaddr_in6 *)dst_addr; ++ ++ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; ++ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) ++ id->route.addr.dev_addr.bound_dev_if = ++ dst_addr6->sin6_scope_id; ++ } else if (dst_addr->sa_family == AF_IB) { ++ ((struct sockaddr_ib *)&zero_sock)->sib_pkey = ++ ((struct sockaddr_ib *)dst_addr)->sib_pkey; ++ } ++ return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); ++} ++ ++/* ++ * If required, resolve the source address for bind and leave the id_priv in ++ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior ++ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is ++ * ignored. ++ */ ++static int resolve_prepare_src(struct rdma_id_private *id_priv, ++ struct sockaddr *src_addr, ++ const struct sockaddr *dst_addr) ++{ ++ int ret; ++ ++ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { ++ /* For a well behaved ULP state will be RDMA_CM_IDLE */ ++ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); ++ if (ret) ++ return ret; ++ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, ++ RDMA_CM_ADDR_QUERY))) ++ return -EINVAL; ++ ++ } ++ ++ if (cma_family(id_priv) != dst_addr->sa_family) { ++ ret = -EINVAL; ++ goto err_state; ++ } ++ return 0; ++ ++err_state: ++ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); ++ return ret; ++} ++ ++int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, ++ const struct sockaddr *dst_addr, unsigned long timeout_ms) ++{ ++ struct rdma_id_private *id_priv = ++ container_of(id, struct rdma_id_private, id); ++ int ret; ++ ++ ret = resolve_prepare_src(id_priv, src_addr, dst_addr); ++ if (ret) ++ return ret; ++ ++ if (cma_any_addr(dst_addr)) { ++ ret = cma_resolve_loopback(id_priv); ++ } else { ++ if (dst_addr->sa_family == AF_IB) { ++ ret = cma_resolve_ib_addr(id_priv); ++ } else { ++ /* ++ * The FSM can return back to RDMA_CM_ADDR_BOUND after ++ * rdma_resolve_ip() is called, eg through the error ++ * path in addr_handler(). If this happens the existing ++ * request must be canceled before issuing a new one. ++ * Since canceling a request is a bit slow and this ++ * oddball path is rare, keep track once a request has ++ * been issued. The track turns out to be a permanent ++ * state since this is the only cancel as it is ++ * immediately before rdma_resolve_ip(). ++ */ ++ if (id_priv->used_resolve_ip) ++ rdma_addr_cancel(&id->route.addr.dev_addr); ++ else ++ id_priv->used_resolve_ip = 1; ++ ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, ++ &id->route.addr.dev_addr, ++ timeout_ms, addr_handler, ++ false, id_priv); ++ } ++ } ++ if (ret) ++ goto err; ++ ++ return 0; ++err: ++ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); ++ return ret; ++} ++EXPORT_SYMBOL(rdma_resolve_addr); ++ ++int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) ++{ ++ struct rdma_id_private *id_priv = ++ container_of(id, struct rdma_id_private, id); ++ ++ return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); ++} + EXPORT_SYMBOL(rdma_bind_addr); + + static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) +-- +2.39.1.1.gbe015eda0162 + diff --git a/series.conf b/series.conf index 06f19bb..1565ba6 100644 --- a/series.conf +++ b/series.conf @@ -58649,6 +58649,7 @@ patches.suse/RDMA-qedr-Fix-doorbell-setting.patch patches.suse/RDMA-qedr-Fix-use-of-uninitialized-field.patch patches.suse/RDMA-qedr-Fix-inline-size-returned-for-iWARP.patch + patches.suse/RDMA-cma-Make-the-locking-for-automatic-state-transi.patch patches.suse/RDMA-hns-Set-the-unsupported-wr-opcode.patch patches.suse/RDMA-ucma-Rework-ucma_migrate_id-to-avoid-races-with.patch patches.suse/RDMA-hns-Correct-typo-of-hns_roce_create_cq.patch @@ -62351,6 +62352,7 @@ patches.suse/USB-serial-option-add-support-for-DW5829e.patch patches.suse/USB-serial-option-add-Telit-LE910R1-compositions.patch patches.suse/RDMA-ib_srp-Fix-a-deadlock.patch + patches.suse/RDMA-cma-Do-not-change-route.addr.src_addr-outside-s.patch patches.suse/tracing-Dump-stacktrace-trigger-to-the-corresponding-instance.patch patches.suse/tracing-Have-traceon-and-traceoff-trigger-honor-the-instance.patch patches.suse/tracing-Ensure-trace-buffer-is-at-least-4096-bytes-large.patch @@ -63328,6 +63330,7 @@ patches.suse/fotg210-udc-Add-missing-completion-handler.patch patches.suse/usb-early-xhci-dbc-Fix-a-potential-out-of-bound-memo.patch patches.suse/applicom-Fix-PCI-device-refcount-leak-in-applicom_in.patch + patches.suse/RDMA-core-Refactor-rdma_bind_addr.patch patches.suse/powerpc-powernv-ioda-Skip-unallocated-resources-when.patch patches.suse/powerpc-pseries-lpar-add-missing-RTAS-retry-status-h.patch patches.suse/powerpc-pseries-lparcfg-add-missing-RTAS-retry-statu.patch