Blob Blame History Raw
From: Shung-Hsi Yu <shung-hsi.yu@suse.com>
Subject: kABI: bpf: struct bpf_map kABI workaround
Patch-mainline: never, kabi
References: bsc#1220251 CVE-2023-52447

Commit 876673364161 ("bpf: Defer the free of inner map when necessary") adds a
new field into struct bpf_map. Luckily it falls within the padding, so nothing
else is needed other than hiding it from __GENKSYMS__.

Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
---
 include/linux/bpf.h  |   12 ++++++++++++
 kernel/bpf/syscall.c |    1 +
 2 files changed, 13 insertions(+)

--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -230,11 +230,18 @@ struct bpf_map {
 	 */
 	atomic64_t refcnt ____cacheline_aligned;
 	atomic64_t usercnt;
+#ifndef __GENKSYMS__
 	/* rcu is used before freeing and work is only used during freeing */
 	union {
+#endif
 		struct work_struct work;
+#ifndef __GENKSYMS__
 		struct rcu_head rcu;
 	};
+      /* Assert union of rcu_head and work_struct won't be larger than size
+       * of the original work_struct, thus breaking kABI */
+      static_assert(sizeof(struct work_struct) >= sizeof(struct rcu_head));
+#endif
 	struct mutex freeze_mutex;
 	atomic64_t writecnt;
 	/* 'Ownership' of program-containing map is claimed by the first program
@@ -250,10 +257,15 @@ struct bpf_map {
 	} owner;
 	bool bypass_spec_v1;
 	bool frozen; /* write-once; write-protected by freeze_mutex */
+#ifndef __GENKSYMS__
 	bool free_after_mult_rcu_gp;
+#else
 	void *suse_kabi_padding;
+#endif
 };
 
+static_assert(sizeof(struct work_struct) >= sizeof(struct rcu_head));
+
 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
 {
 	return map->spin_lock_off >= 0;
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -616,6 +616,7 @@ static void bpf_map_free_in_work(struct
 
 static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
 {
+
 	bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
 }