Blob Blame History Raw
From: Matthew Wilcox <willy@infradead.org>
Date: Mon, 18 Jun 2018 16:00:05 -0400
Subject: drm/vmwgfx: Convert to new IDA API
Git-commit: 4eb085e42fdee0314a89cd12bbd7ad67313d50dd
Patch-mainline: v4.19-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Reorder allocation to avoid an awkward lock/unlock/lock sequence.
Simpler code due to being able to use ida_alloc_max(), even if we can't
eliminate the driver's spinlock.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c |   41 +++++++-------------------
 1 file changed, 12 insertions(+), 29 deletions(-)

--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct
 {
 	struct vmwgfx_gmrid_man *gman =
 		(struct vmwgfx_gmrid_man *)man->priv;
-	int ret = 0;
 	int id;
 
 	mem->mm_node = NULL;
 
+	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
+	if (id < 0)
+		return id;
+
 	spin_lock(&gman->lock);
 
 	if (gman->max_gmr_pages > 0) {
 		gman->used_gmr_pages += bo->num_pages;
 		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
-			goto out_err_locked;
+			goto nospace;
 	}
 
-	do {
-		spin_unlock(&gman->lock);
-		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
-			ret = -ENOMEM;
-			goto out_err;
-		}
-		spin_lock(&gman->lock);
-
-		ret = ida_get_new(&gman->gmr_ida, &id);
-		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
-			ida_remove(&gman->gmr_ida, id);
-			ret = 0;
-			goto out_err_locked;
-		}
-	} while (ret == -EAGAIN);
-
-	if (likely(ret == 0)) {
-		mem->mm_node = gman;
-		mem->start = id;
-		mem->num_pages = bo->num_pages;
-	} else
-		goto out_err_locked;
+	mem->mm_node = gman;
+	mem->start = id;
+	mem->num_pages = bo->num_pages;
 
 	spin_unlock(&gman->lock);
 	return 0;
 
-out_err:
-	spin_lock(&gman->lock);
-out_err_locked:
+nospace:
 	gman->used_gmr_pages -= bo->num_pages;
 	spin_unlock(&gman->lock);
-	return ret;
+	ida_free(&gman->gmr_ida, id);
+	return 0;
 }
 
 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struc
 		(struct vmwgfx_gmrid_man *)man->priv;
 
 	if (mem->mm_node) {
+		ida_free(&gman->gmr_ida, mem->start);
 		spin_lock(&gman->lock);
-		ida_remove(&gman->gmr_ida, mem->start);
 		gman->used_gmr_pages -= mem->num_pages;
 		spin_unlock(&gman->lock);
 		mem->mm_node = NULL;