Blob Blame History Raw
From: Jakub Kicinski <jakub.kicinski@netronome.com>
Date: Wed, 25 Jul 2018 19:53:32 -0700
Subject: nfp: bpf: pass raw data buffer to nfp_bpf_event_output()
Patch-mainline: v4.19-rc1
Git-commit: 20c54204219987d620ca9da567dd54e569863dad
References: bsc#1109837

In preparation for SKB-less perf event handling make
nfp_bpf_event_output() take buffer address and length,
not SKB as parameters.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c    |    5 ++++-
 drivers/net/ethernet/netronome/nfp/bpf/main.h    |    3 ++-
 drivers/net/ethernet/netronome/nfp/bpf/offload.c |   21 +++++++++------------
 3 files changed, 15 insertions(+), 14 deletions(-)

--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -441,7 +441,10 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app
 	}
 
 	if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
-		nfp_bpf_event_output(bpf, skb);
+		if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
+			dev_consume_skb_any(skb);
+		else
+			dev_kfree_skb_any(skb);
 		return;
 	}
 
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -501,7 +501,8 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf
 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
 			       void *key, void *next_key);
 
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+			 unsigned int len);
 
 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
 #endif
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -453,23 +453,24 @@ nfp_bpf_perf_event_copy(void *dst, const
 	return 0;
 }
 
-int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
+int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+			 unsigned int len)
 {
-	struct cmsg_bpf_event *cbe = (void *)skb->data;
+	struct cmsg_bpf_event *cbe = (void *)data;
 	u32 pkt_size, data_size;
 	struct bpf_map *map;
 
-	if (skb->len < sizeof(struct cmsg_bpf_event))
-		goto err_drop;
+	if (len < sizeof(struct cmsg_bpf_event))
+		return -EINVAL;
 
 	pkt_size = be32_to_cpu(cbe->pkt_size);
 	data_size = be32_to_cpu(cbe->data_size);
 	map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
 
-	if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
-		goto err_drop;
+	if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+		return -EINVAL;
 	if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
-		goto err_drop;
+		return -EINVAL;
 
 	rcu_read_lock();
 	if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
@@ -477,7 +478,7 @@ int nfp_bpf_event_output(struct nfp_app_
 		rcu_read_unlock();
 		pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
 			map);
-		goto err_drop;
+		return -EINVAL;
 	}
 
 	bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
@@ -485,11 +486,7 @@ int nfp_bpf_event_output(struct nfp_app_
 			 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
 	rcu_read_unlock();
 
-	dev_consume_skb_any(skb);
 	return 0;
-err_drop:
-	dev_kfree_skb_any(skb);
-	return -EINVAL;
 }
 
 static int