Blob Blame History Raw
From: John Hurley <john.hurley@netronome.com>
Date: Mon, 25 Sep 2017 12:23:39 +0200
Subject: nfp: offload vxlan IPv4 endpoints of flower rules
Patch-mainline: v4.15-rc1
Git-commit: 2d9ad71a8ce67eea9ee38512a215e1893bd5cf87
References: bsc#1109837

Maintain a list of IPv4 addresses used as the tunnel destination IP match
fields in currently active flower rules. Offload the entire list of
NFP_FL_IPV4_ADDRS_MAX (even if some are unused) when new IPs are added or
removed. The NFP should only be aware of tunnel end points that are
currently used by rules on the device

Signed-off-by: John Hurley <john.hurley@netronome.com>
Reviewed-by: Simon Horman <simon.horman@netronome.com>
Signed-off-by: Simon Horman <simon.horman@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/netronome/nfp/flower/cmsg.h        |    1 
 drivers/net/ethernet/netronome/nfp/flower/main.h        |    7 
 drivers/net/ethernet/netronome/nfp/flower/match.c       |   14 +
 drivers/net/ethernet/netronome/nfp/flower/offload.c     |    4 
 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c |  120 ++++++++++++++++
 5 files changed, 143 insertions(+), 3 deletions(-)

--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -318,6 +318,7 @@ enum nfp_flower_cmsg_type_port {
 	NFP_FLOWER_CMSG_TYPE_MAC_REPR =		7,
 	NFP_FLOWER_CMSG_TYPE_PORT_MOD =		8,
 	NFP_FLOWER_CMSG_TYPE_TUN_MAC =		11,
+	NFP_FLOWER_CMSG_TYPE_TUN_IPS =		14,
 	NFP_FLOWER_CMSG_TYPE_FLOW_STATS =	15,
 	NFP_FLOWER_CMSG_TYPE_PORT_ECHO =	16,
 	NFP_FLOWER_CMSG_TYPE_MAX =		32,
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -86,8 +86,10 @@ struct nfp_fl_stats_id {
  * @cmsg_skbs:		List of skbs for control message processing
  * @nfp_mac_off_list:	List of MAC addresses to offload
  * @nfp_mac_index_list:	List of unique 8-bit indexes for non NFP netdevs
+ * @nfp_ipv4_off_list:	List of IPv4 addresses to offload
  * @nfp_mac_off_lock:	Lock for the MAC address list
  * @nfp_mac_index_lock:	Lock for the MAC index list
+ * @nfp_ipv4_off_lock:	Lock for the IPv4 address list
  * @nfp_mac_off_ids:	IDA to manage id assignment for offloaded macs
  * @nfp_mac_off_count:	Number of MACs in address list
  * @nfp_tun_mac_nb:	Notifier to monitor link state
@@ -105,8 +107,10 @@ struct nfp_flower_priv {
 	struct sk_buff_head cmsg_skbs;
 	struct list_head nfp_mac_off_list;
 	struct list_head nfp_mac_index_list;
+	struct list_head nfp_ipv4_off_list;
 	struct mutex nfp_mac_off_lock;
 	struct mutex nfp_mac_index_lock;
+	struct mutex nfp_ipv4_off_lock;
 	struct ida nfp_mac_off_ids;
 	int nfp_mac_off_count;
 	struct notifier_block nfp_tun_mac_nb;
@@ -142,6 +146,7 @@ struct nfp_fl_payload {
 	struct rcu_head rcu;
 	spinlock_t lock; /* lock stats */
 	struct nfp_fl_stats stats;
+	__be32 nfp_tun_ipv4_addr;
 	char *unmasked_data;
 	char *mask_data;
 	char *action_data;
@@ -182,5 +187,7 @@ void nfp_flower_rx_flow_stats(struct nfp
 int nfp_tunnel_config_start(struct nfp_app *app);
 void nfp_tunnel_config_stop(struct nfp_app *app);
 void nfp_tunnel_write_macs(struct nfp_app *app);
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
 
 #endif
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -195,7 +195,7 @@ nfp_flower_compile_ipv6(struct nfp_flowe
 static void
 nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
 			 struct tc_cls_flower_offload *flow,
-			 bool mask_version)
+			 bool mask_version, __be32 *tun_dst)
 {
 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
 	struct flow_dissector_key_ipv4_addrs *vxlan_ips;
@@ -223,6 +223,7 @@ nfp_flower_compile_vxlan(struct nfp_flow
 					     target);
 		frame->ip_src = vxlan_ips->src;
 		frame->ip_dst = vxlan_ips->dst;
+		*tun_dst = vxlan_ips->dst;
 	}
 }
 
@@ -232,6 +233,7 @@ int nfp_flower_compile_flow_match(struct
 				  struct nfp_fl_payload *nfp_flow)
 {
 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+	__be32 tun_dst, tun_dst_mask = 0;
 	struct nfp_repr *netdev_repr;
 	int err;
 	u8 *ext;
@@ -336,10 +338,10 @@ int nfp_flower_compile_flow_match(struct
 	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
 		/* Populate Exact VXLAN Data. */
 		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
-					 flow, false);
+					 flow, false, &tun_dst);
 		/* Populate Mask VXLAN Data. */
 		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
-					 flow, true);
+					 flow, true, &tun_dst_mask);
 		ext += sizeof(struct nfp_flower_vxlan);
 		msk += sizeof(struct nfp_flower_vxlan);
 
@@ -347,6 +349,12 @@ int nfp_flower_compile_flow_match(struct
 		if (nfp_netdev_is_nfp_repr(netdev)) {
 			netdev_repr = netdev_priv(netdev);
 			nfp_tunnel_write_macs(netdev_repr->app);
+
+			/* Store the tunnel destination in the rule data.
+			 * This must be present and be an exact match.
+			 */
+			nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+			nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
 		}
 	}
 
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -306,6 +306,7 @@ nfp_flower_allocate_new(struct nfp_fl_ke
 	if (!flow_pay->action_data)
 		goto err_free_mask;
 
+	flow_pay->nfp_tun_ipv4_addr = 0;
 	flow_pay->meta.flags = 0;
 	spin_lock_init(&flow_pay->lock);
 
@@ -415,6 +416,9 @@ nfp_flower_del_offload(struct nfp_app *a
 	if (err)
 		goto err_free_flow;
 
+	if (nfp_flow->nfp_tun_ipv4_addr)
+		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+
 	err = nfp_flower_xmit_flow(netdev, nfp_flow,
 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
 	if (err)
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -32,6 +32,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
 #include <linux/idr.h>
 #include <net/dst_metadata.h>
 
@@ -40,6 +41,30 @@
 #include "../nfp_net_repr.h"
 #include "../nfp_net.h"
 
+#define NFP_FL_IPV4_ADDRS_MAX        32
+
+/**
+ * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
+ * @count:	number of IPs populated in the array
+ * @ipv4_addr:	array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
+ */
+struct nfp_tun_ipv4_addr {
+	__be32 count;
+	__be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
+/**
+ * struct nfp_ipv4_addr_entry - cached IPv4 addresses
+ * @ipv4_addr:	IP address
+ * @ref_count:	number of rules currently using this IP
+ * @list:	list pointer
+ */
+struct nfp_ipv4_addr_entry {
+	__be32 ipv4_addr;
+	int ref_count;
+	struct list_head list;
+};
+
 /**
  * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
  * @reserved:	reserved for future use
@@ -112,6 +137,87 @@ nfp_flower_xmit_tun_conf(struct nfp_app
 	return 0;
 }
 
+static void nfp_tun_write_ipv4_list(struct nfp_app *app)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	struct nfp_ipv4_addr_entry *entry;
+	struct nfp_tun_ipv4_addr payload;
+	struct list_head *ptr, *storage;
+	int count;
+
+	memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
+	mutex_lock(&priv->nfp_ipv4_off_lock);
+	count = 0;
+	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+		if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+			mutex_unlock(&priv->nfp_ipv4_off_lock);
+			nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
+			return;
+		}
+		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+		payload.ipv4_addr[count++] = entry->ipv4_addr;
+	}
+	payload.count = cpu_to_be32(count);
+	mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
+				 sizeof(struct nfp_tun_ipv4_addr),
+				 &payload);
+}
+
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	struct nfp_ipv4_addr_entry *entry;
+	struct list_head *ptr, *storage;
+
+	mutex_lock(&priv->nfp_ipv4_off_lock);
+	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+		if (entry->ipv4_addr == ipv4) {
+			entry->ref_count++;
+			mutex_unlock(&priv->nfp_ipv4_off_lock);
+			return;
+		}
+	}
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		mutex_unlock(&priv->nfp_ipv4_off_lock);
+		nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+		return;
+	}
+	entry->ipv4_addr = ipv4;
+	entry->ref_count = 1;
+	list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
+	mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+	nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	struct nfp_ipv4_addr_entry *entry;
+	struct list_head *ptr, *storage;
+
+	mutex_lock(&priv->nfp_ipv4_off_lock);
+	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+		entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+		if (entry->ipv4_addr == ipv4) {
+			entry->ref_count--;
+			if (!entry->ref_count) {
+				list_del(&entry->list);
+				kfree(entry);
+			}
+			break;
+		}
+	}
+	mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+	nfp_tun_write_ipv4_list(app);
+}
+
 void nfp_tunnel_write_macs(struct nfp_app *app)
 {
 	struct nfp_flower_priv *priv = app->priv;
@@ -324,6 +430,10 @@ int nfp_tunnel_config_start(struct nfp_a
 	INIT_LIST_HEAD(&priv->nfp_mac_index_list);
 	ida_init(&priv->nfp_mac_off_ids);
 
+	/* Initialise priv data for IPv4 offloading. */
+	mutex_init(&priv->nfp_ipv4_off_lock);
+	INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+
 	err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
 	if (err)
 		goto err_free_mac_ida;
@@ -346,6 +456,7 @@ void nfp_tunnel_config_stop(struct nfp_a
 	struct nfp_tun_mac_offload_entry *mac_entry;
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_tun_mac_non_nfp_idx *mac_idx;
+	struct nfp_ipv4_addr_entry *ip_entry;
 	struct list_head *ptr, *storage;
 
 	unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
@@ -371,4 +482,13 @@ void nfp_tunnel_config_stop(struct nfp_a
 	mutex_unlock(&priv->nfp_mac_index_lock);
 
 	ida_destroy(&priv->nfp_mac_off_ids);
+
+	/* Free any memory that may be occupied by ipv4 list. */
+	mutex_lock(&priv->nfp_ipv4_off_lock);
+	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+		list_del(&ip_entry->list);
+		kfree(ip_entry);
+	}
+	mutex_unlock(&priv->nfp_ipv4_off_lock);
 }