Blob Blame History Raw
From: Matthew Wilcox <mawilcox@microsoft.com>
Date: Tue, 28 Nov 2017 12:05:54 -0500
Subject: cls_u32: Reinstate cyclic allocation
Patch-mainline: v4.16-rc1
Git-commit: ffdc2d9e1afd20e9f9d205115661481e984542d6
References: bsc#1109837

Commit e7614370d6f0 ("net_sched: use idr to allocate u32 filter handles)
converted htid allocation to use the IDR.  The ID allocated by this
scheme changes; it used to be cyclic, but now always allocates the
lowest available.  The IDR supports cyclic allocation, so just use
the right function.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 net/sched/cls_u32.c |   14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -317,19 +317,13 @@ static void *u32_get(struct tcf_proto *t
 	return u32_lookup_key(ht, handle);
 }
 
+/* Protected by rtnl lock */
 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
 {
-	unsigned long idr_index;
-	int err;
-
-	/* This is only used inside rtnl lock it is safe to increment
-	 * without read _copy_ update semantics
-	 */
-	err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index,
-			    1, 0x7FF, GFP_KERNEL);
-	if (err)
+	int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
+	if (id < 0)
 		return 0;
-	return (u32)(idr_index | 0x800) << 20;
+	return (id | 0x800U) << 20;
 }
 
 static struct hlist_head *tc_u_common_hash;