Blob Blame History Raw
From: Vlastimil Babka <vbabka@suse.cz>
Subject: pagecache limit: add tracepoints
Patch-mainline: never, SUSE specific
References: bnc#924701

TODO description

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

---
 include/trace/events/pagecache-limit.h |   99 +++++++++++++++++++++++++++++++++
 include/trace/events/vmscan.h          |    2 
 mm/vmscan.c                            |    6 ++
 3 files changed, 107 insertions(+)

--- /dev/null
+++ b/include/trace/events/pagecache-limit.h
@@ -0,0 +1,99 @@
+
+/*
+ * This file defines pagecache limit specific tracepoints and should only be
+ * included through include/trace/events/vmscan.h, never directly.
+ */
+
+TRACE_EVENT(mm_shrink_page_cache_start,
+
+	TP_PROTO(gfp_t mask),
+
+	TP_ARGS(mask),
+
+	TP_STRUCT__entry(
+		__field(gfp_t, mask)
+	),
+
+	TP_fast_assign(
+		__entry->mask = mask;
+	),
+
+	TP_printk("mask=%s",
+		show_gfp_flags(__entry->mask))
+);
+
+TRACE_EVENT(mm_shrink_page_cache_end,
+
+	TP_PROTO(unsigned long nr_reclaimed),
+
+	TP_ARGS(nr_reclaimed),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, nr_reclaimed)
+	),
+
+	TP_fast_assign(
+		__entry->nr_reclaimed = nr_reclaimed;
+	),
+
+	TP_printk("nr_reclaimed=%lu",
+		__entry->nr_reclaimed)
+);
+
+TRACE_EVENT(mm_pagecache_reclaim_start,
+
+	TP_PROTO(unsigned long nr_pages, int pass, int prio, gfp_t mask,
+							bool may_write),
+
+	TP_ARGS(nr_pages, pass, prio, mask, may_write),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	nr_pages	)
+		__field(int,		pass		)
+		__field(int,		prio		)
+		__field(gfp_t,		mask		)
+		__field(bool,		may_write	)
+	),
+
+	TP_fast_assign(
+		__entry->nr_pages = nr_pages;
+		__entry->pass = pass;
+		__entry->prio = prio;
+		__entry->mask = mask;
+		__entry->may_write = may_write;
+	),
+
+	TP_printk("nr_pages=%lu pass=%d prio=%d mask=%s may_write=%d",
+		__entry->nr_pages,
+		__entry->pass,
+		__entry->prio,
+		show_gfp_flags(__entry->mask),
+		(int) __entry->may_write)
+);
+
+TRACE_EVENT(mm_pagecache_reclaim_end,
+
+	TP_PROTO(unsigned long nr_scanned, unsigned long nr_reclaimed,
+						unsigned int nr_zones),
+
+	TP_ARGS(nr_scanned, nr_reclaimed, nr_zones),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	nr_scanned	)
+		__field(unsigned long,	nr_reclaimed	)
+		__field(unsigned int,	nr_zones	)
+	),
+
+	TP_fast_assign(
+		__entry->nr_scanned = nr_scanned;
+		__entry->nr_reclaimed = nr_reclaimed;
+		__entry->nr_zones = nr_zones;
+	),
+
+	TP_printk("nr_scanned=%lu nr_reclaimed=%lu nr_scanned_zones=%u",
+		__entry->nr_scanned,
+		__entry->nr_reclaimed,
+		__entry->nr_zones)
+);
+
+
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -37,6 +37,8 @@
 		(RECLAIM_WB_ASYNC) \
 	)
 
+#include "pagecache-limit.h"
+
 TRACE_EVENT(mm_vmscan_kswapd_sleep,
 
 	TP_PROTO(int nid),
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3756,6 +3756,8 @@ static int shrink_all_nodes(unsigned lon
 	int nid;
 
 	prepare_to_wait(&pagecache_reclaim_wq, &wait, TASK_INTERRUPTIBLE);
+	trace_mm_pagecache_reclaim_start(nr_pages, pass, sc->priority, sc->gfp_mask,
+							sc->may_writepage);
 
 	for_each_online_node(nid) {
 		struct pglist_data *pgdat = NODE_DATA(nid);
@@ -3853,6 +3855,8 @@ out_wakeup:
 	wake_up_interruptible(&pagecache_reclaim_wq);
 out:
 	sc->nr_reclaimed = nr_reclaimed;
+	trace_mm_pagecache_reclaim_end(sc->nr_scanned, nr_reclaimed,
+							nr_locked_zones);
 	return nr_locked_zones;
 }
 
@@ -3912,6 +3916,7 @@ retry:
 	/* But do a few at least */
 	nr_pages = max_t(unsigned long, nr_pages, 8*SWAP_CLUSTER_MAX);
 	inc_pagecache_limit_stat(NR_PAGECACHE_LIMIT_THROTTLED);
+	trace_mm_shrink_page_cache_start(mask);
 
 	/*
 	 * Shrink the LRU in 2 passes:
@@ -3948,6 +3953,7 @@ retry:
 		}
 	}
 out:
+	trace_mm_shrink_page_cache_end(ret);
 	dec_pagecache_limit_stat(NR_PAGECACHE_LIMIT_THROTTLED);
 }