|
Mel Gorman |
0078f1 |
From 4ac8227a3c744372d9f8eaf801c0d8e3be0b70aa Mon Sep 17 00:00:00 2001
|
|
Mel Gorman |
0078f1 |
From: "Paul E. McKenney" <paulmck@kernel.org>
|
|
Mel Gorman |
0078f1 |
Date: Tue, 9 Nov 2021 13:37:34 -0800
|
|
Mel Gorman |
0078f1 |
Subject: [PATCH] rcu-tasks: Abstract checking of callback lists
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
References: bnc#1189998 (PREEMPT_RT prerequisite backports)
|
|
Mel Gorman |
0078f1 |
Patch-mainline: v5.17-rc0
|
|
Mel Gorman |
0078f1 |
Git-commit: 4d1114c05467b5f421d99121bff22a9633390722
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
This commit adds a rcu_tasks_need_gpcb() function that returns an
|
|
Mel Gorman |
0078f1 |
indication of whether another grace period is required, and if no grace
|
|
Mel Gorman |
0078f1 |
period is required, whether there are callbacks that need to be invoked.
|
|
Mel Gorman |
0078f1 |
The function scans all per-CPU lists currently in use.
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
Reported-by: Martin Lau <kafai@fb.com>
|
|
Mel Gorman |
0078f1 |
Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com>
|
|
Mel Gorman |
0078f1 |
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
|
|
Mel Gorman |
0078f1 |
Signed-off-by: Mel Gorman <mgorman@suse.de>
|
|
Mel Gorman |
0078f1 |
---
|
|
Mel Gorman |
0078f1 |
kernel/rcu/tasks.h | 61 ++++++++++++++++++++++++++++++++++--------------------
|
|
Mel Gorman |
0078f1 |
1 file changed, 38 insertions(+), 23 deletions(-)
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
|
|
Mel Gorman |
0078f1 |
index 03c44c9b3909..47ac74ea8d9c 100644
|
|
Mel Gorman |
0078f1 |
--- a/kernel/rcu/tasks.h
|
|
Mel Gorman |
0078f1 |
+++ b/kernel/rcu/tasks.h
|
|
Mel Gorman |
0078f1 |
@@ -229,11 +229,38 @@ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
|
|
Mel Gorman |
0078f1 |
wait_rcu_gp(rtp->call_func);
|
|
Mel Gorman |
0078f1 |
}
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
+// Advance callbacks and indicate whether either a grace period or
|
|
Mel Gorman |
0078f1 |
+// callback invocation is needed.
|
|
Mel Gorman |
0078f1 |
+static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
|
|
Mel Gorman |
0078f1 |
+{
|
|
Mel Gorman |
0078f1 |
+ int cpu;
|
|
Mel Gorman |
0078f1 |
+ unsigned long flags;
|
|
Mel Gorman |
0078f1 |
+ int needgpcb = 0;
|
|
Mel Gorman |
0078f1 |
+
|
|
Mel Gorman |
0078f1 |
+ for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) {
|
|
Mel Gorman |
0078f1 |
+ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
|
|
Mel Gorman |
0078f1 |
+
|
|
Mel Gorman |
0078f1 |
+ /* Advance and accelerate any new callbacks. */
|
|
Mel Gorman |
0078f1 |
+ if (rcu_segcblist_empty(&rtpcp->cblist))
|
|
Mel Gorman |
0078f1 |
+ continue;
|
|
Mel Gorman |
0078f1 |
+ raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
|
Mel Gorman |
0078f1 |
+ rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
|
|
Mel Gorman |
0078f1 |
+ (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
|
|
Mel Gorman |
0078f1 |
+ if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
|
|
Mel Gorman |
0078f1 |
+ needgpcb |= 0x3;
|
|
Mel Gorman |
0078f1 |
+ if (!rcu_segcblist_empty(&rtpcp->cblist))
|
|
Mel Gorman |
0078f1 |
+ needgpcb |= 0x1;
|
|
Mel Gorman |
0078f1 |
+ raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
|
Mel Gorman |
0078f1 |
+ }
|
|
Mel Gorman |
0078f1 |
+ return needgpcb;
|
|
Mel Gorman |
0078f1 |
+}
|
|
Mel Gorman |
0078f1 |
+
|
|
Mel Gorman |
0078f1 |
/* RCU-tasks kthread that detects grace periods and invokes callbacks. */
|
|
Mel Gorman |
0078f1 |
static int __noreturn rcu_tasks_kthread(void *arg)
|
|
Mel Gorman |
0078f1 |
{
|
|
Mel Gorman |
0078f1 |
unsigned long flags;
|
|
Mel Gorman |
0078f1 |
int len;
|
|
Mel Gorman |
0078f1 |
+ int needgpcb;
|
|
Mel Gorman |
0078f1 |
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
|
|
Mel Gorman |
0078f1 |
struct rcu_head *rhp;
|
|
Mel Gorman |
0078f1 |
struct rcu_tasks *rtp = arg;
|
|
Mel Gorman |
0078f1 |
@@ -249,37 +276,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
|
Mel Gorman |
0078f1 |
* This loop is terminated by the system going down. ;-)
|
|
Mel Gorman |
0078f1 |
*/
|
|
Mel Gorman |
0078f1 |
for (;;) {
|
|
Mel Gorman |
0078f1 |
- struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each...
|
|
Mel Gorman |
0078f1 |
+ struct rcu_tasks_percpu *rtpcp;
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
- /* Pick up any new callbacks. */
|
|
Mel Gorman |
0078f1 |
- raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
|
Mel Gorman |
0078f1 |
- rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
|
|
Mel Gorman |
0078f1 |
- (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
|
|
Mel Gorman |
0078f1 |
- raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
|
Mel Gorman |
0078f1 |
-
|
|
Mel Gorman |
0078f1 |
/* If there were none, wait a bit and start over. */
|
|
Mel Gorman |
0078f1 |
- if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
|
|
Mel Gorman |
0078f1 |
- wait_event_interruptible(rtp->cbs_wq,
|
|
Mel Gorman |
0078f1 |
- rcu_segcblist_pend_cbs(&rtpcp->cblist));
|
|
Mel Gorman |
0078f1 |
- if (!rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
|
|
Mel Gorman |
0078f1 |
- WARN_ON(signal_pending(current));
|
|
Mel Gorman |
0078f1 |
- set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
|
|
Mel Gorman |
0078f1 |
- schedule_timeout_idle(HZ/10);
|
|
Mel Gorman |
0078f1 |
- }
|
|
Mel Gorman |
0078f1 |
- continue;
|
|
Mel Gorman |
0078f1 |
+ wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp)));
|
|
Mel Gorman |
0078f1 |
+
|
|
Mel Gorman |
0078f1 |
+ if (needgpcb & 0x2) {
|
|
Mel Gorman |
0078f1 |
+ // Wait for one grace period.
|
|
Mel Gorman |
0078f1 |
+ set_tasks_gp_state(rtp, RTGS_WAIT_GP);
|
|
Mel Gorman |
0078f1 |
+ rtp->gp_start = jiffies;
|
|
Mel Gorman |
0078f1 |
+ rcu_seq_start(&rtp->tasks_gp_seq);
|
|
Mel Gorman |
0078f1 |
+ rtp->gp_func(rtp);
|
|
Mel Gorman |
0078f1 |
+ rcu_seq_end(&rtp->tasks_gp_seq);
|
|
Mel Gorman |
0078f1 |
}
|
|
Mel Gorman |
0078f1 |
|
|
Mel Gorman |
0078f1 |
- // Wait for one grace period.
|
|
Mel Gorman |
0078f1 |
- set_tasks_gp_state(rtp, RTGS_WAIT_GP);
|
|
Mel Gorman |
0078f1 |
- rtp->gp_start = jiffies;
|
|
Mel Gorman |
0078f1 |
- rcu_seq_start(&rtp->tasks_gp_seq);
|
|
Mel Gorman |
0078f1 |
- rtp->gp_func(rtp);
|
|
Mel Gorman |
0078f1 |
- rcu_seq_end(&rtp->tasks_gp_seq);
|
|
Mel Gorman |
0078f1 |
-
|
|
Mel Gorman |
0078f1 |
/* Invoke the callbacks. */
|
|
Mel Gorman |
0078f1 |
set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
|
|
Mel Gorman |
0078f1 |
+ rtpcp = per_cpu_ptr(rtp->rtpcpu, 0);
|
|
Mel Gorman |
0078f1 |
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
|
Mel Gorman |
0078f1 |
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
|
|
Mel Gorman |
0078f1 |
rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl;;
|