Blob Blame History Raw
From: Jiong Wang <jiong.wang@netronome.com>
Date: Wed, 28 Mar 2018 17:48:27 -0700
Subject: nfp: bpf: detect packet reads could be cached, enable the
 optimisation
Patch-mainline: v4.17-rc1
Git-commit: 87b10ecdced224dc0def123e1f57bc6c5ac4ac5c
References: bsc#1109837

This patch is the front end of this optimisation, it detects and marks
those packet reads that could be cached. Then the optimisation "backend"
will be activated automatically.

Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/netronome/nfp/bpf/jit.c  |  115 ++++++++++++++++++++++++++
 drivers/net/ethernet/netronome/nfp/bpf/main.h |   30 ++++++
 2 files changed, 145 insertions(+)

--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -2968,6 +2968,120 @@ static void nfp_bpf_opt_ldst_gather(stru
 	}
 }
 
+static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
+{
+	struct nfp_insn_meta *meta, *range_node = NULL;
+	s16 range_start = 0, range_end = 0;
+	bool cache_avail = false;
+	struct bpf_insn *insn;
+	s32 range_ptr_off = 0;
+	u32 range_ptr_id = 0;
+
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
+		if (meta->flags & FLAG_INSN_IS_JUMP_DST)
+			cache_avail = false;
+
+		if (meta->skip)
+			continue;
+
+		insn = &meta->insn;
+
+		if (is_mbpf_store_pkt(meta) ||
+		    insn->code == (BPF_JMP | BPF_CALL) ||
+		    is_mbpf_classic_store_pkt(meta) ||
+		    is_mbpf_classic_load(meta)) {
+			cache_avail = false;
+			continue;
+		}
+
+		if (!is_mbpf_load(meta))
+			continue;
+
+		if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) {
+			cache_avail = false;
+			continue;
+		}
+
+		if (!cache_avail) {
+			cache_avail = true;
+			if (range_node)
+				goto end_current_then_start_new;
+			goto start_new;
+		}
+
+		/* Check ID to make sure two reads share the same
+		 * variable offset against PTR_TO_PACKET, and check OFF
+		 * to make sure they also share the same constant
+		 * offset.
+		 *
+		 * OFFs don't really need to be the same, because they
+		 * are the constant offsets against PTR_TO_PACKET, so
+		 * for different OFFs, we could canonicalize them to
+		 * offsets against original packet pointer. We don't
+		 * support this.
+		 */
+		if (meta->ptr.id == range_ptr_id &&
+		    meta->ptr.off == range_ptr_off) {
+			s16 new_start = range_start;
+			s16 end, off = insn->off;
+			s16 new_end = range_end;
+			bool changed = false;
+
+			if (off < range_start) {
+				new_start = off;
+				changed = true;
+			}
+
+			end = off + BPF_LDST_BYTES(insn);
+			if (end > range_end) {
+				new_end = end;
+				changed = true;
+			}
+
+			if (!changed)
+				continue;
+
+			if (new_end - new_start <= 64) {
+				/* Install new range. */
+				range_start = new_start;
+				range_end = new_end;
+				continue;
+			}
+		}
+
+end_current_then_start_new:
+		range_node->pkt_cache.range_start = range_start;
+		range_node->pkt_cache.range_end = range_end;
+start_new:
+		range_node = meta;
+		range_node->pkt_cache.do_init = true;
+		range_ptr_id = range_node->ptr.id;
+		range_ptr_off = range_node->ptr.off;
+		range_start = insn->off;
+		range_end = insn->off + BPF_LDST_BYTES(insn);
+	}
+
+	if (range_node) {
+		range_node->pkt_cache.range_start = range_start;
+		range_node->pkt_cache.range_end = range_end;
+	}
+
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
+		if (meta->skip)
+			continue;
+
+		if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
+			if (meta->pkt_cache.do_init) {
+				range_start = meta->pkt_cache.range_start;
+				range_end = meta->pkt_cache.range_end;
+			} else {
+				meta->pkt_cache.range_start = range_start;
+				meta->pkt_cache.range_end = range_end;
+			}
+		}
+	}
+}
+
 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
 {
 	nfp_bpf_opt_reg_init(nfp_prog);
@@ -2975,6 +3089,7 @@ static int nfp_bpf_optimize(struct nfp_p
 	nfp_bpf_opt_ld_mask(nfp_prog);
 	nfp_bpf_opt_ld_shift(nfp_prog);
 	nfp_bpf_opt_ldst_gather(nfp_prog);
+	nfp_bpf_opt_pkt_cache(nfp_prog);
 
 	return 0;
 }
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -278,6 +278,36 @@ static inline bool is_mbpf_store(const s
 	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
 }
 
+static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
+{
+	return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
+}
+
+static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
+{
+	return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
+}
+
+static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
+{
+	u8 code = meta->insn.code;
+
+	return BPF_CLASS(code) == BPF_LD &&
+	       (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
+}
+
+static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
+{
+	u8 code = meta->insn.code;
+
+	return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
+}
+
+static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
+{
+	return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
+}
+
 /**
  * struct nfp_prog - nfp BPF program
  * @bpf: backpointer to the bpf app priv structure