From: Aharon Landau <aharonl@nvidia.com>
Date: Tue, 15 Feb 2022 19:55:29 +0200
Subject: RDMA/mlx5: Remove redundant work in struct mlx5_cache_ent
Patch-mainline: v5.18-rc1
Git-commit: 185b9826782a53529b2b57328a8f49b1d0cf8f8f
References: jsc#PED-1552
delayed_cache_work_func() and the cache_work_func() are both wrappers of
__cache_work_func(). Instead of having a special not delayed work, use the
delayed work with delay = 0.
Link: https://lore.kernel.org/r/18b6ae205e75f087aa4a2a05c81ea8b66d8d88dc.1644947594.git.leonro@nvidia.com
Signed-off-by: Aharon Landau <aharonl@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 -
drivers/infiniband/hw/mlx5/mr.c | 16 +++-------------
2 files changed, 3 insertions(+), 14 deletions(-)
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -788,7 +788,6 @@ struct mlx5_cache_ent {
u32 miss;
struct mlx5_ib_dev *dev;
- struct work_struct work;
struct delayed_work dwork;
};
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -478,14 +478,14 @@ static void queue_adjust_cache_locked(st
return;
if (ent->available_mrs < ent->limit) {
ent->fill_to_high_water = true;
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water &&
ent->available_mrs + ent->pending < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->available_mrs == 2 * ent->limit) {
ent->fill_to_high_water = false;
} else if (ent->available_mrs > 2 * ent->limit) {
@@ -495,7 +495,7 @@ static void queue_adjust_cache_locked(st
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
- queue_work(ent->dev->cache.wq, &ent->work);
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
}
}
@@ -571,14 +571,6 @@ static void delayed_cache_work_func(stru
__cache_work_func(ent);
}
-static void cache_work_func(struct work_struct *work)
-{
- struct mlx5_cache_ent *ent;
-
- ent = container_of(work, struct mlx5_cache_ent, work);
- __cache_work_func(ent);
-}
-
/* Allocate a special entry from the cache */
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
unsigned int entry, int access_flags)
@@ -739,7 +731,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_de
ent->dev = dev;
ent->limit = 0;
- INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
if (i > MR_CACHE_LAST_STD_ENTRY) {
@@ -783,7 +774,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib
spin_lock_irq(&ent->lock);
ent->disabled = true;
spin_unlock_irq(&ent->lock);
- cancel_work_sync(&ent->work);
cancel_delayed_work_sync(&ent->dwork);
}