Blob Blame History Raw
From: Davide Caratti <dcaratti@redhat.com>
Date: Mon, 22 Jan 2018 18:14:32 +0100
Subject: net/sched: act_csum: don't use spinlock in the fast path
Patch-mainline: v4.16-rc1
Git-commit: 9c5f69bbd75a7db80578782b037629c5f1e59dce
References: bsc#1109837

use RCU instead of spin_{,unlock}_bh() to protect concurrent read/write on
act_csum configuration, to reduce the effects of contention in the data
path when multiple readers are present.

Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 include/net/tc_act/tc_csum.h |   16 ++++++++++-
 net/sched/act_csum.c         |   58 +++++++++++++++++++++++++++++++++----------
 2 files changed, 59 insertions(+), 15 deletions(-)

--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -5,10 +5,16 @@
 #include <net/act_api.h>
 #include <linux/tc_act/tc_csum.h>
 
+struct tcf_csum_params {
+	int action;
+	u32 update_flags;
+	struct rcu_head rcu;
+};
+
 struct tcf_csum {
 	struct tc_action common;
 
-	u32 update_flags;
+	struct tcf_csum_params __rcu *params;
 };
 #define to_tcf_csum(a) ((struct tcf_csum *)a)
 
@@ -23,7 +29,13 @@ static inline bool is_tcf_csum(const str
 
 static inline u32 tcf_csum_update_flags(const struct tc_action *a)
 {
-	return to_tcf_csum(a)->update_flags;
+	u32 update_flags;
+
+	rcu_read_lock();
+	update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags;
+	rcu_read_unlock();
+
+	return update_flags;
 }
 
 #endif /* __NET_TC_CSUM_H */
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -49,6 +49,7 @@ static int tcf_csum_init(struct net *net
 			 int bind)
 {
 	struct tc_action_net *tn = net_generic(net, csum_net_id);
+	struct tcf_csum_params *params_old, *params_new;
 	struct nlattr *tb[TCA_CSUM_MAX + 1];
 	struct tc_csum *parm;
 	struct tcf_csum *p;
@@ -80,10 +81,21 @@ static int tcf_csum_init(struct net *net
 	}
 
 	p = to_tcf_csum(*a);
-	spin_lock_bh(&p->tcf_lock);
-	p->tcf_action = parm->action;
-	p->update_flags = parm->update_flags;
-	spin_unlock_bh(&p->tcf_lock);
+	ASSERT_RTNL();
+
+	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+	if (unlikely(!params_new)) {
+		if (ret == ACT_P_CREATED)
+			tcf_idr_release(*a, bind);
+		return -ENOMEM;
+	}
+	params_old = rtnl_dereference(p->params);
+
+	params_new->action = parm->action;
+	params_new->update_flags = parm->update_flags;
+	rcu_assign_pointer(p->params, params_new);
+	if (params_old)
+		kfree_rcu(params_old, rcu);
 
 	if (ret == ACT_P_CREATED)
 		tcf_idr_insert(tn, *a);
@@ -539,19 +551,21 @@ static int tcf_csum(struct sk_buff *skb,
 		    struct tcf_result *res)
 {
 	struct tcf_csum *p = to_tcf_csum(a);
-	int action;
+	struct tcf_csum_params *params;
 	u32 update_flags;
+	int action;
+
+	rcu_read_lock();
+	params = rcu_dereference(p->params);
 
 	tcf_lastuse_update(&p->tcf_tm);
 	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
-	spin_lock(&p->tcf_lock);
-	action = p->tcf_action;
-	update_flags = p->update_flags;
-	spin_unlock(&p->tcf_lock);
 
+	action = params->action;
 	if (unlikely(action == TC_ACT_SHOT))
-		goto drop;
+		goto drop_stats;
 
+	update_flags = params->update_flags;
 	switch (tc_skb_protocol(skb)) {
 	case cpu_to_be16(ETH_P_IP):
 		if (!tcf_csum_ipv4(skb, update_flags))
@@ -563,11 +577,16 @@ static int tcf_csum(struct sk_buff *skb,
 		break;
 	}
 
+unlock:
+	rcu_read_unlock();
 	return action;
 
 drop:
+	action = TC_ACT_SHOT;
+
+drop_stats:
 	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
-	return TC_ACT_SHOT;
+	goto unlock;
 }
 
 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@ -575,15 +594,18 @@ static int tcf_csum_dump(struct sk_buff
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_csum *p = to_tcf_csum(a);
+	struct tcf_csum_params *params;
 	struct tc_csum opt = {
-		.update_flags = p->update_flags,
 		.index   = p->tcf_index,
-		.action  = p->tcf_action,
 		.refcnt  = p->tcf_refcnt - ref,
 		.bindcnt = p->tcf_bindcnt - bind,
 	};
 	struct tcf_t t;
 
+	params = rtnl_dereference(p->params);
+	opt.action = params->action;
+	opt.update_flags = params->update_flags;
+
 	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
 
@@ -598,6 +620,15 @@ nla_put_failure:
 	return -1;
 }
 
+static void tcf_csum_cleanup(struct tc_action *a)
+{
+	struct tcf_csum *p = to_tcf_csum(a);
+	struct tcf_csum_params *params;
+
+	params = rcu_dereference_protected(p->params, 1);
+	kfree_rcu(params, rcu);
+}
+
 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
 			   struct netlink_callback *cb, int type,
 			   const struct tc_action_ops *ops)
@@ -621,6 +652,7 @@ static struct tc_action_ops act_csum_ops
 	.act		= tcf_csum,
 	.dump		= tcf_csum_dump,
 	.init		= tcf_csum_init,
+	.cleanup	= tcf_csum_cleanup,
 	.walk		= tcf_csum_walker,
 	.lookup		= tcf_csum_search,
 	.size		= sizeof(struct tcf_csum),