Blob Blame History Raw
From: Parav Pandit <parav@mellanox.com>
Date: Tue, 22 May 2018 20:33:46 +0300
Subject: IB/core: Introduce and use rdma_gid_table()
Patch-mainline: v4.18-rc1
Git-commit: 724631a9c6e98e39480c70047cc3fe2d1f25e1fc
References: bsc#1103992 FATE#326009

There are several places a gid table is accessed.
Have a helper tiny function rdma_gid_table() to avoid code
duplication at such places.

Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/core/cache.c |   25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -159,6 +159,11 @@ int ib_cache_gid_parse_type_str(const ch
 }
 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
 
+static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
+{
+	return device->cache.ports[port - rdma_start_port(device)].gid;
+}
+
 static void del_roce_gid(struct ib_device *device, u8 port_num,
 			 struct ib_gid_table *table, int ix)
 {
@@ -376,7 +381,7 @@ static int __ib_cache_gid_add(struct ib_
 	if (rdma_is_zero_gid(gid))
 		return -EINVAL;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	mutex_lock(&table->lock);
 
@@ -443,7 +448,7 @@ _ib_cache_gid_del(struct ib_device *ib_d
 	int ret = 0;
 	int ix;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	mutex_lock(&table->lock);
 
@@ -482,7 +487,7 @@ int ib_cache_gid_del_all_netdev_gids(str
 	int ix;
 	bool deleted = false;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	mutex_lock(&table->lock);
 
@@ -506,7 +511,7 @@ static int __ib_cache_gid_get(struct ib_
 {
 	struct ib_gid_table *table;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	if (index < 0 || index >= table->sz)
 		return -EINVAL;
@@ -599,7 +604,7 @@ int ib_find_cached_gid_by_port(struct ib
 	if (!rdma_is_port_valid(ib_dev, port))
 		return -ENOENT;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	if (ndev)
 		mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -657,7 +662,7 @@ static int ib_cache_gid_find_by_filter(s
 	    !rdma_protocol_roce(ib_dev, port))
 		return -EPROTONOSUPPORT;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	read_lock_irqsave(&table->rwlock, flags);
 	for (i = 0; i < table->sz; i++) {
@@ -756,7 +761,7 @@ void ib_cache_gid_set_default_gid(struct
 	unsigned int gid_type;
 	unsigned long mask;
 
-	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
+	table = rdma_gid_table(ib_dev, port);
 
 	mask = GID_ATTR_FIND_MASK_GID_TYPE |
 	       GID_ATTR_FIND_MASK_DEFAULT |
@@ -877,7 +882,7 @@ int ib_get_cached_gid(struct ib_device *
 	if (!rdma_is_port_valid(device, port_num))
 		return -EINVAL;
 
-	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
+	table = rdma_gid_table(device, port_num);
 	read_lock_irqsave(&table->rwlock, flags);
 	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
 	read_unlock_irqrestore(&table->rwlock, flags);
@@ -1095,7 +1100,7 @@ static int config_non_roce_gid_cache(str
 
 	gid_attr.device = device;
 	gid_attr.port_num = port;
-	table = device->cache.ports[port - rdma_start_port(device)].gid;
+	table = rdma_gid_table(device, port);
 
 	mutex_lock(&table->lock);
 	for (i = 0; i < gid_tbl_len; ++i) {
@@ -1128,7 +1133,7 @@ static void ib_cache_update(struct ib_de
 	if (!rdma_is_port_valid(device, port))
 		return;
 
-	table = device->cache.ports[port - rdma_start_port(device)].gid;
+	table = rdma_gid_table(device, port);
 
 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
 	if (!tprops)