Blob Blame History Raw
From: John Hurley <john.hurley@netronome.com>
Date: Thu, 2 Nov 2017 01:31:30 -0700
Subject: nfp: flower: vxlan - ensure no sleep in atomic context
Patch-mainline: v4.15-rc1
Git-commit: dc4646a950153242313ed340f0a404de38b21d5c
References: bsc#1109837

Functions called by the netevent notifier must be in atomic context.
Change the mutex to spinlock and ensure mem allocations are done with the
atomic flag.
Also, remove unnecessary locking after notifiers are unregistered.

Signed-off-by: John Hurley <john.hurley@netronome.com>
Reviewed-by: Simon Horman <simon.horman@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/netronome/nfp/flower/cmsg.c        |    9 +--
 drivers/net/ethernet/netronome/nfp/flower/cmsg.h        |    2 
 drivers/net/ethernet/netronome/nfp/flower/main.h        |    2 
 drivers/net/ethernet/netronome/nfp/flower/offload.c     |    2 
 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c |   47 ++++++----------
 5 files changed, 28 insertions(+), 34 deletions(-)

--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -50,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *
 
 struct sk_buff *
 nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
-		      enum nfp_flower_cmsg_type_port type)
+		      enum nfp_flower_cmsg_type_port type, gfp_t flag)
 {
 	struct nfp_flower_cmsg_hdr *ch;
 	struct sk_buff *skb;
 
 	size += NFP_FLOWER_CMSG_HLEN;
 
-	skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
+	skb = nfp_app_ctrl_msg_alloc(app, size, flag);
 	if (!skb)
 		return NULL;
 
@@ -78,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nf
 	unsigned int size;
 
 	size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
-	skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR);
+	skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
+				    GFP_KERNEL);
 	if (!skb)
 		return NULL;
 
@@ -109,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_r
 	struct sk_buff *skb;
 
 	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
-				    NFP_FLOWER_CMSG_TYPE_PORT_MOD);
+				    NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -458,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct w
 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
 struct sk_buff *
 nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
-		      enum nfp_flower_cmsg_type_port type);
+		      enum nfp_flower_cmsg_type_port type, gfp_t flag);
 
 #endif
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -115,7 +115,7 @@ struct nfp_flower_priv {
 	struct mutex nfp_mac_off_lock;
 	struct mutex nfp_mac_index_lock;
 	struct mutex nfp_ipv4_off_lock;
-	struct mutex nfp_neigh_off_lock;
+	spinlock_t nfp_neigh_off_lock;
 	struct ida nfp_mac_off_ids;
 	int nfp_mac_off_count;
 	struct notifier_block nfp_tun_mac_nb;
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -95,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *
 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
 
-	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
+	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -224,12 +224,13 @@ static bool nfp_tun_is_netdev_to_offload
 }
 
 static int
-nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata)
+nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
+			 gfp_t flag)
 {
 	struct sk_buff *skb;
 	unsigned char *msg;
 
-	skb = nfp_flower_cmsg_alloc(app, plen, mtype);
+	skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
 	if (!skb)
 		return -ENOMEM;
 
@@ -246,15 +247,15 @@ static bool nfp_tun_has_route(struct nfp
 	struct nfp_ipv4_route_entry *entry;
 	struct list_head *ptr, *storage;
 
-	mutex_lock(&priv->nfp_neigh_off_lock);
+	spin_lock_bh(&priv->nfp_neigh_off_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 		if (entry->ipv4_addr == ipv4_addr) {
-			mutex_unlock(&priv->nfp_neigh_off_lock);
+			spin_unlock_bh(&priv->nfp_neigh_off_lock);
 			return true;
 		}
 	}
-	mutex_unlock(&priv->nfp_neigh_off_lock);
+	spin_unlock_bh(&priv->nfp_neigh_off_lock);
 	return false;
 }
 
@@ -264,24 +265,24 @@ static void nfp_tun_add_route_to_cache(s
 	struct nfp_ipv4_route_entry *entry;
 	struct list_head *ptr, *storage;
 
-	mutex_lock(&priv->nfp_neigh_off_lock);
+	spin_lock_bh(&priv->nfp_neigh_off_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 		if (entry->ipv4_addr == ipv4_addr) {
-			mutex_unlock(&priv->nfp_neigh_off_lock);
+			spin_unlock_bh(&priv->nfp_neigh_off_lock);
 			return;
 		}
 	}
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 	if (!entry) {
-		mutex_unlock(&priv->nfp_neigh_off_lock);
+		spin_unlock_bh(&priv->nfp_neigh_off_lock);
 		nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
 		return;
 	}
 
 	entry->ipv4_addr = ipv4_addr;
 	list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
-	mutex_unlock(&priv->nfp_neigh_off_lock);
+	spin_unlock_bh(&priv->nfp_neigh_off_lock);
 }
 
 static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
@@ -290,7 +291,7 @@ static void nfp_tun_del_route_from_cache
 	struct nfp_ipv4_route_entry *entry;
 	struct list_head *ptr, *storage;
 
-	mutex_lock(&priv->nfp_neigh_off_lock);
+	spin_lock_bh(&priv->nfp_neigh_off_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 		entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
 		if (entry->ipv4_addr == ipv4_addr) {
@@ -299,12 +300,12 @@ static void nfp_tun_del_route_from_cache
 			break;
 		}
 	}
-	mutex_unlock(&priv->nfp_neigh_off_lock);
+	spin_unlock_bh(&priv->nfp_neigh_off_lock);
 }
 
 static void
 nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
-		    struct flowi4 *flow, struct neighbour *neigh)
+		    struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
 {
 	struct nfp_tun_neigh payload;
 
@@ -334,7 +335,7 @@ nfp_tun_write_neigh(struct net_device *n
 send_msg:
 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
 				 sizeof(struct nfp_tun_neigh),
-				 (unsigned char *)&payload);
+				 (unsigned char *)&payload, flag);
 }
 
 static int
@@ -385,7 +386,7 @@ nfp_tun_neigh_event_handler(struct notif
 #endif
 
 	flow.flowi4_proto = IPPROTO_UDP;
-	nfp_tun_write_neigh(n->dev, app, &flow, n);
+	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
 
 	return NOTIFY_OK;
 }
@@ -423,7 +424,7 @@ void nfp_tunnel_request_route(struct nfp
 	ip_rt_put(rt);
 	if (!n)
 		goto route_fail_warning;
-	nfp_tun_write_neigh(n->dev, app, &flow, n);
+	nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
 	neigh_release(n);
 	return;
 
@@ -456,7 +457,7 @@ static void nfp_tun_write_ipv4_list(stru
 
 	nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
 				 sizeof(struct nfp_tun_ipv4_addr),
-				 &payload);
+				 &payload, GFP_KERNEL);
 }
 
 void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
@@ -548,7 +549,7 @@ void nfp_tunnel_write_macs(struct nfp_ap
 	}
 
 	err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
-				       pay_size, payload);
+				       pay_size, payload, GFP_KERNEL);
 
 	kfree(payload);
 
@@ -729,7 +730,7 @@ int nfp_tunnel_config_start(struct nfp_a
 	INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
 
 	/* Initialise priv data for neighbour offloading. */
-	mutex_init(&priv->nfp_neigh_off_lock);
+	spin_lock_init(&priv->nfp_neigh_off_lock);
 	INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
 	priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
 
@@ -769,43 +770,35 @@ void nfp_tunnel_config_stop(struct nfp_a
 	unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
 
 	/* Free any memory that may be occupied by MAC list. */
-	mutex_lock(&priv->nfp_mac_off_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
 		mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
 				       list);
 		list_del(&mac_entry->list);
 		kfree(mac_entry);
 	}
-	mutex_unlock(&priv->nfp_mac_off_lock);
 
 	/* Free any memory that may be occupied by MAC index list. */
-	mutex_lock(&priv->nfp_mac_index_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
 		mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
 				     list);
 		list_del(&mac_idx->list);
 		kfree(mac_idx);
 	}
-	mutex_unlock(&priv->nfp_mac_index_lock);
 
 	ida_destroy(&priv->nfp_mac_off_ids);
 
 	/* Free any memory that may be occupied by ipv4 list. */
-	mutex_lock(&priv->nfp_ipv4_off_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
 		ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
 		list_del(&ip_entry->list);
 		kfree(ip_entry);
 	}
-	mutex_unlock(&priv->nfp_ipv4_off_lock);
 
 	/* Free any memory that may be occupied by the route list. */
-	mutex_lock(&priv->nfp_neigh_off_lock);
 	list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
 		route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
 					 list);
 		list_del(&route_entry->list);
 		kfree(route_entry);
 	}
-	mutex_unlock(&priv->nfp_neigh_off_lock);
 }