Blob Blame History Raw
From: Andrii Nakryiko <andriin@fb.com>
Date: Wed, 9 Oct 2019 13:14:58 -0700
Subject: selftests/bpf: Add read-only map values propagation tests
Patch-mainline: v5.5-rc1
Git-commit: 666b2c10ee9d51f14d04c416a14b1cb6fd0846e4
References: bsc#1155518

Add tests checking that verifier does proper constant propagation for
read-only maps. If constant propagation didn't work, skipp_loop and
part_loop BPF programs would be rejected due to BPF verifier otherwise
not being able to prove they ever complete. With constant propagation,
though, they are succesfully validated as properly terminating loops.

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20191009201458.2679171-3-andriin@fb.com
Acked-by: Gary Lin <glin@suse.com>
---
 tools/testing/selftests/bpf/prog_tests/rdonly_maps.c |   99 +++++++++++++++++++
 tools/testing/selftests/bpf/progs/test_rdonly_maps.c |   83 +++++++++++++++
 2 files changed, 182 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
 create mode 100644 tools/testing/selftests/bpf/progs/test_rdonly_maps.c

--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/rdonly_maps.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+struct bss {
+	unsigned did_run;
+	unsigned iters;
+	unsigned sum;
+};
+
+struct rdonly_map_subtest {
+	const char *subtest_name;
+	const char *prog_name;
+	unsigned exp_iters;
+	unsigned exp_sum;
+};
+
+void test_rdonly_maps(void)
+{
+	const char *prog_name_skip_loop = "raw_tracepoint/sys_enter:skip_loop";
+	const char *prog_name_part_loop = "raw_tracepoint/sys_enter:part_loop";
+	const char *prog_name_full_loop = "raw_tracepoint/sys_enter:full_loop";
+	const char *file = "test_rdonly_maps.o";
+	struct rdonly_map_subtest subtests[] = {
+		{ "skip loop", prog_name_skip_loop, 0, 0 },
+		{ "part loop", prog_name_part_loop, 3, 2 + 3 + 4 },
+		{ "full loop", prog_name_full_loop, 4, 2 + 3 + 4 + 5 },
+	};
+	int i, err, zero = 0, duration = 0;
+	struct bpf_link *link = NULL;
+	struct bpf_program *prog;
+	struct bpf_map *bss_map;
+	struct bpf_object *obj;
+	struct bss bss;
+
+	obj = bpf_object__open_file(file, NULL);
+	if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+		return;
+
+	bpf_object__for_each_program(prog, obj) {
+		bpf_program__set_raw_tracepoint(prog);
+	}
+
+	err = bpf_object__load(obj);
+	if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+		goto cleanup;
+
+	bss_map = bpf_object__find_map_by_name(obj, "test_rdo.bss");
+	if (CHECK(!bss_map, "find_bss_map", "failed\n"))
+		goto cleanup;
+
+	for (i = 0; i < ARRAY_SIZE(subtests); i++) {
+		const struct rdonly_map_subtest *t = &subtests[i];
+
+		if (!test__start_subtest(t->subtest_name))
+			continue;
+
+		prog = bpf_object__find_program_by_title(obj, t->prog_name);
+		if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
+			  t->prog_name))
+			goto cleanup;
+
+		memset(&bss, 0, sizeof(bss));
+		err = bpf_map_update_elem(bpf_map__fd(bss_map), &zero, &bss, 0);
+		if (CHECK(err, "set_bss", "failed to set bss data: %d\n", err))
+			goto cleanup;
+
+		link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+		if (CHECK(IS_ERR(link), "attach_prog", "prog '%s', err %ld\n",
+			  t->prog_name, PTR_ERR(link))) {
+			link = NULL;
+			goto cleanup;
+		}
+
+		/* trigger probe */
+		usleep(1);
+
+		bpf_link__destroy(link);
+		link = NULL;
+
+		err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, &bss);
+		if (CHECK(err, "get_bss", "failed to get bss data: %d\n", err))
+			goto cleanup;
+		if (CHECK(bss.did_run == 0, "check_run",
+			  "prog '%s' didn't run?\n", t->prog_name))
+			goto cleanup;
+		if (CHECK(bss.iters != t->exp_iters, "check_iters",
+			  "prog '%s' iters: %d, expected: %d\n",
+			  t->prog_name, bss.iters, t->exp_iters))
+			goto cleanup;
+		if (CHECK(bss.sum != t->exp_sum, "check_sum",
+			  "prog '%s' sum: %d, expected: %d\n",
+			  t->prog_name, bss.sum, t->exp_sum))
+			goto cleanup;
+	}
+
+cleanup:
+	bpf_link__destroy(link);
+	bpf_object__close(obj);
+}
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_rdonly_maps.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+static volatile const struct {
+	unsigned a[4];
+	/*
+	 * if the struct's size is multiple of 16, compiler will put it into
+	 * .rodata.cst16 section, which is not recognized by libbpf; work
+	 * around this by ensuring we don't have 16-aligned struct
+	 */
+	char _y;
+} rdonly_values = { .a = {2, 3, 4, 5} };
+
+static volatile struct {
+	unsigned did_run;
+	unsigned iters;
+	unsigned sum;
+} res;
+
+SEC("raw_tracepoint/sys_enter:skip_loop")
+int skip_loop(struct pt_regs *ctx)
+{
+	/* prevent compiler to optimize everything out */
+	unsigned * volatile p = (void *)&rdonly_values.a;
+	unsigned iters = 0, sum = 0;
+
+	/* we should never enter this loop */
+	while (*p & 1) {
+		iters++;
+		sum += *p;
+		p++;
+	}
+	res.did_run = 1;
+	res.iters = iters;
+	res.sum = sum;
+	return 0;
+}
+
+SEC("raw_tracepoint/sys_enter:part_loop")
+int part_loop(struct pt_regs *ctx)
+{
+	/* prevent compiler to optimize everything out */
+	unsigned * volatile p = (void *)&rdonly_values.a;
+	unsigned iters = 0, sum = 0;
+
+	/* validate verifier can derive loop termination */
+	while (*p < 5) {
+		iters++;
+		sum += *p;
+		p++;
+	}
+	res.did_run = 1;
+	res.iters = iters;
+	res.sum = sum;
+	return 0;
+}
+
+SEC("raw_tracepoint/sys_enter:full_loop")
+int full_loop(struct pt_regs *ctx)
+{
+	/* prevent compiler to optimize everything out */
+	unsigned * volatile p = (void *)&rdonly_values.a;
+	int i = sizeof(rdonly_values.a) / sizeof(rdonly_values.a[0]);
+	unsigned iters = 0, sum = 0;
+
+	/* validate verifier can allow full loop as well */
+	while (i > 0 ) {
+		iters++;
+		sum += *p;
+		p++;
+		i--;
+	}
+	res.did_run = 1;
+	res.iters = iters;
+	res.sum = sum;
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";