Blob Blame History Raw
From: Thomas Hellstrom <thellstrom@vmware.com>
Date: Tue, 19 Jun 2018 19:22:16 +0200
Subject: drm/vmwgfx: Use blocking buffer object reserves when evicting
 resources
Git-commit: 19f976ab01a6bb1f36384dd8cc743b88a6b7ebd6
Patch-mainline: v4.19-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Previously when evicting resources we were unconditionally calling
ttm_eu_reserve_buffers with a NULL ww acquire context. That meant all
buffer object reserves were done using trylock semantics.
That makes sense when evicting during resource validation, because then
there already are a number of buffers reserved and using waiting locks
would cause lockdep errors.

That's not the case when unconditionally evicting all resources as part
of driver takedown or hibernation, so in that code path, make sure
we have a ww acquire context to get waiting lock buffer object reserve
semantics.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |   29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -433,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_r
  *                             for a resource and in that case, allocate
  *                             one, reserve and validate it.
  *
+ * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
  * @res:            The resource for which to allocate a backup buffer.
  * @interruptible:  Whether any sleeps during allocation should be
  *                  performed while interruptible.
@@ -440,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_r
  *                  reserved and validated backup buffer.
  */
 static int
-vmw_resource_check_buffer(struct vmw_resource *res,
+vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
+			  struct vmw_resource *res,
 			  bool interruptible,
 			  struct ttm_validate_buffer *val_buf)
 {
@@ -459,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_res
 	val_buf->bo = ttm_bo_reference(&res->backup->base);
 	val_buf->shared = false;
 	list_add_tail(&val_buf->head, &val_list);
-	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
+	ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
 	if (unlikely(ret != 0))
 		goto out_no_reserve;
 
@@ -477,7 +479,7 @@ vmw_resource_check_buffer(struct vmw_res
 	return 0;
 
 out_no_validate:
-	ttm_eu_backoff_reservation(NULL, &val_list);
+	ttm_eu_backoff_reservation(ticket, &val_list);
 out_no_reserve:
 	ttm_bo_unref(&val_buf->bo);
 	if (backup_dirty)
@@ -524,10 +526,12 @@ int vmw_resource_reserve(struct vmw_reso
  * vmw_resource_backoff_reservation - Unreserve and unreference a
  *                                    backup buffer
  *.
+ * @ticket:         The ww acquire ctx used for reservation.
  * @val_buf:        Backup buffer information.
  */
 static void
-vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+				 struct ttm_validate_buffer *val_buf)
 {
 	struct list_head val_list;
 
@@ -536,7 +540,7 @@ vmw_resource_backoff_reservation(struct
 
 	INIT_LIST_HEAD(&val_list);
 	list_add_tail(&val_buf->head, &val_list);
-	ttm_eu_backoff_reservation(NULL, &val_list);
+	ttm_eu_backoff_reservation(ticket, &val_list);
 	ttm_bo_unref(&val_buf->bo);
 }
 
@@ -544,10 +548,12 @@ vmw_resource_backoff_reservation(struct
  * vmw_resource_do_evict - Evict a resource, and transfer its data
  *                         to a backup buffer.
  *
+ * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
  * @res:            The resource to evict.
  * @interruptible:  Whether to wait interruptible.
  */
-static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
+				 struct vmw_resource *res, bool interruptible)
 {
 	struct ttm_validate_buffer val_buf;
 	const struct vmw_res_func *func = res->func;
@@ -557,7 +563,7 @@ static int vmw_resource_do_evict(struct
 
 	val_buf.bo = NULL;
 	val_buf.shared = false;
-	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
+	ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -572,7 +578,7 @@ static int vmw_resource_do_evict(struct
 	res->backup_dirty = true;
 	res->res_dirty = false;
 out_no_unbind:
-	vmw_resource_backoff_reservation(&val_buf);
+	vmw_resource_backoff_reservation(ticket, &val_buf);
 
 	return ret;
 }
@@ -626,7 +632,8 @@ int vmw_resource_validate(struct vmw_res
 
 		write_unlock(&dev_priv->resource_lock);
 
-		ret = vmw_resource_do_evict(evict_res, true);
+		/* Trylock backup buffers with a NULL ticket. */
+		ret = vmw_resource_do_evict(NULL, evict_res, true);
 		if (unlikely(ret != 0)) {
 			write_lock(&dev_priv->resource_lock);
 			list_add_tail(&evict_res->lru_head, lru_list);
@@ -809,6 +816,7 @@ static void vmw_resource_evict_type(stru
 	struct vmw_resource *evict_res;
 	unsigned err_count = 0;
 	int ret;
+	struct ww_acquire_ctx ticket;
 
 	do {
 		write_lock(&dev_priv->resource_lock);
@@ -822,7 +830,8 @@ static void vmw_resource_evict_type(stru
 		list_del_init(&evict_res->lru_head);
 		write_unlock(&dev_priv->resource_lock);
 
-		ret = vmw_resource_do_evict(evict_res, false);
+		/* Wait lock backup buffers with a ticket. */
+		ret = vmw_resource_do_evict(&ticket, evict_res, false);
 		if (unlikely(ret != 0)) {
 			write_lock(&dev_priv->resource_lock);
 			list_add_tail(&evict_res->lru_head, lru_list);