|
Mel Gorman |
4860ec |
From 9cc25a7b48aabd633080d6242378ba708cf47ee3 Mon Sep 17 00:00:00 2001
|
|
Mel Gorman |
4860ec |
From: Peter Zijlstra <peterz@infradead.org>
|
|
Mel Gorman |
4860ec |
Date: Tue, 23 Jan 2018 20:34:30 +0100
|
|
Mel Gorman |
4860ec |
Subject: [PATCH] sched/core: Optimize update_stats_*()
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
References: bnc#1101669 optimise numa balancing for fast migrate
|
|
Benjamin Poirier |
2528c7 |
Patch-mainline: v4.16-rc1
|
|
Mel Gorman |
4860ec |
Git-commit: 2ed41a55023dc5be6742ca0eb8df5cb20e8dcaae
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
These functions are already gated by schedstats_enabled(), there is no
|
|
Mel Gorman |
4860ec |
point in then issuing another static_branch for every individual
|
|
Mel Gorman |
4860ec |
update in them.
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
Mel Gorman |
4860ec |
Cc: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Mel Gorman |
4860ec |
Cc: Mike Galbraith <efault@gmx.de>
|
|
Mel Gorman |
4860ec |
Cc: Peter Zijlstra <peterz@infradead.org>
|
|
Mel Gorman |
4860ec |
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
Mel Gorman |
4860ec |
Cc: linux-kernel@vger.kernel.org
|
|
Mel Gorman |
4860ec |
Signed-off-by: Ingo Molnar <mingo@kernel.org>
|
|
Mel Gorman |
4860ec |
Signed-off-by: Mel Gorman <mgorman@suse.de>
|
|
Mel Gorman |
4860ec |
---
|
|
Mel Gorman |
4860ec |
kernel/sched/fair.c | 32 ++++++++++++++++----------------
|
|
Mel Gorman |
4860ec |
kernel/sched/stats.h | 4 ++++
|
|
Mel Gorman |
4860ec |
2 files changed, 20 insertions(+), 16 deletions(-)
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
Mel Gorman |
4860ec |
index 4b656b1ee149..0ac3f9c7e787 100644
|
|
Mel Gorman |
4860ec |
--- a/kernel/sched/fair.c
|
|
Mel Gorman |
4860ec |
+++ b/kernel/sched/fair.c
|
|
Mel Gorman |
4860ec |
@@ -882,7 +882,7 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
Mel Gorman |
4860ec |
likely(wait_start > prev_wait_start))
|
|
Mel Gorman |
4860ec |
wait_start -= prev_wait_start;
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.wait_start, wait_start);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.wait_start, wait_start);
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
static inline void
|
|
Mel Gorman |
4860ec |
@@ -904,17 +904,17 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
Mel Gorman |
4860ec |
* time stamp can be adjusted to accumulate wait time
|
|
Mel Gorman |
4860ec |
* prior to migration.
|
|
Mel Gorman |
4860ec |
*/
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.wait_start, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.wait_start, delta);
|
|
Mel Gorman |
4860ec |
return;
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
trace_sched_stat_wait(p, delta);
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.wait_max,
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.wait_max,
|
|
Mel Gorman |
4860ec |
max(schedstat_val(se->statistics.wait_max), delta));
|
|
Mel Gorman |
4860ec |
- schedstat_inc(se->statistics.wait_count);
|
|
Mel Gorman |
4860ec |
- schedstat_add(se->statistics.wait_sum, delta);
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.wait_start, 0);
|
|
Mel Gorman |
4860ec |
+ __schedstat_inc(se->statistics.wait_count);
|
|
Mel Gorman |
4860ec |
+ __schedstat_add(se->statistics.wait_sum, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.wait_start, 0);
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
static inline void
|
|
Mel Gorman |
4860ec |
@@ -939,10 +939,10 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
Mel Gorman |
4860ec |
delta = 0;
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.sleep_max, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.sleep_max, delta);
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.sleep_start, 0);
|
|
Mel Gorman |
4860ec |
- schedstat_add(se->statistics.sum_sleep_runtime, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.sleep_start, 0);
|
|
Mel Gorman |
4860ec |
+ __schedstat_add(se->statistics.sum_sleep_runtime, delta);
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
if (tsk) {
|
|
Mel Gorman |
4860ec |
account_scheduler_latency(tsk, delta >> 10, 1);
|
|
Mel Gorman |
4860ec |
@@ -956,15 +956,15 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
Mel Gorman |
4860ec |
delta = 0;
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
if (unlikely(delta > schedstat_val(se->statistics.block_max)))
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.block_max, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.block_max, delta);
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.block_start, 0);
|
|
Mel Gorman |
4860ec |
- schedstat_add(se->statistics.sum_sleep_runtime, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.block_start, 0);
|
|
Mel Gorman |
4860ec |
+ __schedstat_add(se->statistics.sum_sleep_runtime, delta);
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
if (tsk) {
|
|
Mel Gorman |
4860ec |
if (tsk->in_iowait) {
|
|
Mel Gorman |
4860ec |
- schedstat_add(se->statistics.iowait_sum, delta);
|
|
Mel Gorman |
4860ec |
- schedstat_inc(se->statistics.iowait_count);
|
|
Mel Gorman |
4860ec |
+ __schedstat_add(se->statistics.iowait_sum, delta);
|
|
Mel Gorman |
4860ec |
+ __schedstat_inc(se->statistics.iowait_count);
|
|
Mel Gorman |
4860ec |
trace_sched_stat_iowait(tsk, delta);
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
@@ -1023,10 +1023,10 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
|
Mel Gorman |
4860ec |
struct task_struct *tsk = task_of(se);
|
|
Mel Gorman |
4860ec |
|
|
Mel Gorman |
4860ec |
if (tsk->state & TASK_INTERRUPTIBLE)
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.sleep_start,
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.sleep_start,
|
|
Mel Gorman |
4860ec |
rq_clock(rq_of(cfs_rq)));
|
|
Mel Gorman |
4860ec |
if (tsk->state & TASK_UNINTERRUPTIBLE)
|
|
Mel Gorman |
4860ec |
- schedstat_set(se->statistics.block_start,
|
|
Mel Gorman |
4860ec |
+ __schedstat_set(se->statistics.block_start,
|
|
Mel Gorman |
4860ec |
rq_clock(rq_of(cfs_rq)));
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
}
|
|
Mel Gorman |
4860ec |
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
|
|
Mel Gorman |
4860ec |
index ebee041977b3..cf7308d69d9a 100644
|
|
Mel Gorman |
4860ec |
--- a/kernel/sched/stats.h
|
|
Mel Gorman |
4860ec |
+++ b/kernel/sched/stats.h
|
|
Mel Gorman |
4860ec |
@@ -32,7 +32,9 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
|
|
Mel Gorman |
4860ec |
#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
|
|
Mel Gorman |
4860ec |
#define __schedstat_inc(var) do { var++; } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
|
|
Mel Gorman |
4860ec |
+#define __schedstat_add(var, amt) do { var += (amt); } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
|
|
Mel Gorman |
4860ec |
+#define __schedstat_set(var, val) do { var = (val); } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_val(var) (var)
|
|
Mel Gorman |
4860ec |
#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
|
|
Mel Gorman |
4860ec |
@@ -50,7 +52,9 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
|
|
Mel Gorman |
4860ec |
#define schedstat_enabled() 0
|
|
Mel Gorman |
4860ec |
#define __schedstat_inc(var) do { } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_inc(var) do { } while (0)
|
|
Mel Gorman |
4860ec |
+#define __schedstat_add(var, amt) do { } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_add(var, amt) do { } while (0)
|
|
Mel Gorman |
4860ec |
+#define __schedstat_set(var, val) do { } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_set(var, val) do { } while (0)
|
|
Mel Gorman |
4860ec |
#define schedstat_val(var) 0
|
|
Mel Gorman |
4860ec |
#define schedstat_val_or_zero(var) 0
|