|
|
242d27 |
From d58c23c9254894d438ce5c516745cf694eac86b7 Mon Sep 17 00:00:00 2001
|
|
|
242d27 |
From: =?UTF-8?q?H=C3=A5kon=20Bugge?= <haakon.bugge@oracle.com>
|
|
|
242d27 |
Date: Tue, 25 May 2021 19:49:09 +0200
|
|
|
242d27 |
Subject: [PATCH 1/1] IB/core: Only update PKEY and GID caches on respective
|
|
|
242d27 |
events
|
|
|
242d27 |
MIME-Version: 1.0
|
|
|
242d27 |
Content-Type: text/plain; charset=UTF-8
|
|
|
242d27 |
Content-Transfer-Encoding: 8bit
|
|
|
242d27 |
Git-commit: d58c23c9254894d438ce5c516745cf694eac86b7
|
|
|
242d27 |
Patch-mainline: v5.14
|
|
|
242d27 |
References: git-fixes
|
|
|
242d27 |
|
|
|
242d27 |
Both the PKEY and GID tables in an HCA can hold in the order of hundreds
|
|
|
242d27 |
entries. Reading them is expensive. Partly because the API for retrieving
|
|
|
242d27 |
them only returns a single entry at a time. Further, on certain
|
|
|
242d27 |
implementations, e.g., CX-3, the VFs are paravirtualized in this respect
|
|
|
242d27 |
and have to rely on the PF driver to perform the read. This again demands
|
|
|
242d27 |
VF to PF communication.
|
|
|
242d27 |
|
|
|
242d27 |
IB Core's cache is refreshed on all events. Hence, filter the refresh of
|
|
|
242d27 |
the PKEY and GID caches based on the event received being
|
|
|
242d27 |
IB_EVENT_PKEY_CHANGE and IB_EVENT_GID_CHANGE respectively.
|
|
|
242d27 |
|
|
|
242d27 |
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
|
|
|
242d27 |
Link: https://lore.kernel.org/r/1621964949-28484-1-git-send-email-haakon.bugge@oracle.com
|
|
|
242d27 |
Signed-off-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
|
|
|
242d27 |
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
|
|
|
242d27 |
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
|
|
|
242d27 |
Acked-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin@suse.com>
|
|
|
242d27 |
---
|
|
|
242d27 |
drivers/infiniband/core/cache.c | 23 +++++++++++++++--------
|
|
|
242d27 |
1 file changed, 15 insertions(+), 8 deletions(-)
|
|
|
242d27 |
|
|
|
242d27 |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
|
|
|
242d27 |
index 3b0991fedd81..d32045986109 100644
|
|
|
242d27 |
--- a/drivers/infiniband/core/cache.c
|
|
|
242d27 |
+++ b/drivers/infiniband/core/cache.c
|
|
|
242d27 |
@@ -1465,10 +1465,12 @@ err:
|
|
|
242d27 |
}
|
|
|
242d27 |
|
|
|
242d27 |
static int
|
|
|
242d27 |
-ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
|
|
|
242d27 |
+ib_cache_update(struct ib_device *device, u8 port, bool update_gids,
|
|
|
242d27 |
+ bool update_pkeys, bool enforce_security)
|
|
|
242d27 |
{
|
|
|
242d27 |
struct ib_port_attr *tprops = NULL;
|
|
|
242d27 |
- struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
|
|
|
242d27 |
+ struct ib_pkey_cache *pkey_cache = NULL;
|
|
|
242d27 |
+ struct ib_pkey_cache *old_pkey_cache = NULL;
|
|
|
242d27 |
int i;
|
|
|
242d27 |
int ret;
|
|
|
242d27 |
|
|
|
242d27 |
@@ -1432,14 +1434,16 @@ ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
|
|
|
242d27 |
goto err;
|
|
|
242d27 |
}
|
|
|
242d27 |
|
|
|
242d27 |
- if (!rdma_protocol_roce(device, port)) {
|
|
|
242d27 |
+ if (!rdma_protocol_roce(device, port) && update_gids) {
|
|
|
242d27 |
ret = config_non_roce_gid_cache(device, port,
|
|
|
242d27 |
tprops->gid_tbl_len);
|
|
|
242d27 |
if (ret)
|
|
|
242d27 |
goto err;
|
|
|
242d27 |
}
|
|
|
242d27 |
|
|
|
242d27 |
- if (tprops->pkey_tbl_len) {
|
|
|
242d27 |
+ update_pkeys &= !!tprops->pkey_tbl_len;
|
|
|
242d27 |
+
|
|
|
242d27 |
+ if (update_pkeys) {
|
|
|
242d27 |
pkey_cache = kmalloc(struct_size(pkey_cache, table,
|
|
|
242d27 |
tprops->pkey_tbl_len),
|
|
|
242d27 |
GFP_KERNEL);
|
|
|
242d27 |
@@ -1517,9 +1521,10 @@ ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
|
|
|
242d27 |
|
|
|
242d27 |
write_lock_irq(&device->cache_lock);
|
|
|
242d27 |
|
|
|
242d27 |
- old_pkey_cache = device->port_data[port].cache.pkey;
|
|
|
242d27 |
-
|
|
|
242d27 |
- device->port_data[port].cache.pkey = pkey_cache;
|
|
|
242d27 |
+ if (update_pkeys) {
|
|
|
242d27 |
+ old_pkey_cache = device->port_data[port].cache.pkey;
|
|
|
242d27 |
+ device->port_data[port].cache.pkey = pkey_cache;
|
|
|
242d27 |
+ }
|
|
|
242d27 |
device->port_data[port].cache.lmc = tprops->lmc;
|
|
|
242d27 |
device->port_data[port].cache.port_state = tprops->state;
|
|
|
242d27 |
|
|
|
242d27 |
@@ -1551,6 +1556,8 @@ static void ib_cache_event_task(struct work_struct *_work)
|
|
|
242d27 |
* the cache.
|
|
|
242d27 |
*/
|
|
|
242d27 |
ret = ib_cache_update(work->event.device, work->event.element.port_num,
|
|
|
242d27 |
+ work->event.event == IB_EVENT_GID_CHANGE,
|
|
|
242d27 |
+ work->event.event == IB_EVENT_PKEY_CHANGE,
|
|
|
242d27 |
work->enforce_security);
|
|
|
242d27 |
|
|
|
242d27 |
/* GID event is notified already for individual GID entries by
|
|
|
242d27 |
@@ -1624,7 +1631,7 @@ int ib_cache_setup_one(struct ib_device *device)
|
|
|
242d27 |
return err;
|
|
|
242d27 |
|
|
|
242d27 |
rdma_for_each_port (device, p) {
|
|
|
242d27 |
- err = ib_cache_update(device, p, true);
|
|
|
242d27 |
+ err = ib_cache_update(device, p, true, true, true);
|
|
|
242d27 |
if (err)
|
|
|
242d27 |
return err;
|
|
|
242d27 |
}
|
|
|
242d27 |
--
|
|
|
242d27 |
2.38.0.1.gee35aeee4b76
|
|
|
242d27 |
|