Blob Blame History Raw
From a6ae74c956e72c9b57f29576bd6cb7eb1ad4006c Mon Sep 17 00:00:00 2001
From: Rob Clark <robdclark@chromium.org>
Date: Fri, 23 Oct 2020 09:51:03 -0700
Subject: drm/msm/gem: Add obj->lock wrappers
Git-commit: a6ae74c956e72c9b57f29576bd6cb7eb1ad4006c
Patch-mainline: v5.11-rc1
References: bsc#1152489

This will make it easier to transition over to obj->resv locking for
everything that is per-bo locking.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Kristian H. Kristensen <hoegsberg@google.com>
Signed-off-by: Rob Clark <robdclark@chromium.org>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
---
 drivers/gpu/drm/msm/msm_gem.c |   99 +++++++++++++++++++-----------------------
 drivers/gpu/drm/msm/msm_gem.h |   28 +++++++++++
 2 files changed, 74 insertions(+), 53 deletions(-)

--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -188,15 +188,15 @@ struct page **msm_gem_get_pages(struct d
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct page **p;
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 
 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
-		mutex_unlock(&msm_obj->lock);
+		msm_gem_unlock(obj);
 		return ERR_PTR(-EBUSY);
 	}
 
 	p = get_pages(obj);
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	return p;
 }
 
@@ -262,14 +262,14 @@ vm_fault_t msm_gem_fault(struct vm_fault
 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 	 * a reference on obj. So, we dont need to hold one here.
 	 */
-	err = mutex_lock_interruptible(&msm_obj->lock);
+	err = msm_gem_lock_interruptible(obj);
 	if (err) {
 		ret = VM_FAULT_NOPAGE;
 		goto out;
 	}
 
 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
-		mutex_unlock(&msm_obj->lock);
+		msm_gem_unlock(obj);
 		return VM_FAULT_SIGBUS;
 	}
 
@@ -290,7 +290,7 @@ vm_fault_t msm_gem_fault(struct vm_fault
 
 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 out_unlock:
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 out:
 	return ret;
 }
@@ -299,10 +299,9 @@ out:
 static uint64_t mmap_offset(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	int ret;
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	/* Make it mmapable */
 	ret = drm_gem_create_mmap_offset(obj);
@@ -318,11 +317,10 @@ static uint64_t mmap_offset(struct drm_g
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 {
 	uint64_t offset;
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 	offset = mmap_offset(obj);
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	return offset;
 }
 
@@ -332,7 +330,7 @@ static struct msm_gem_vma *add_vma(struc
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 	if (!vma)
@@ -351,7 +349,7 @@ static struct msm_gem_vma *lookup_vma(st
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	list_for_each_entry(vma, &msm_obj->vmas, list) {
 		if (vma->aspace == aspace)
@@ -370,14 +368,14 @@ static void del_vma(struct msm_gem_vma *
 	kfree(vma);
 }
 
-/* Called with msm_obj->lock locked */
+/* Called with msm_obj locked */
 static void
 put_iova(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma, *tmp;
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 		if (vma->aspace) {
@@ -392,11 +390,10 @@ static int msm_gem_get_iova_locked(struc
 		struct msm_gem_address_space *aspace, uint64_t *iova,
 		u64 range_start, u64 range_end)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 	int ret = 0;
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	vma = lookup_vma(obj, aspace);
 
@@ -431,7 +428,7 @@ static int msm_gem_pin_iova(struct drm_g
 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
 		prot |= IOMMU_PRIV;
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
 		return -EBUSY;
@@ -456,11 +453,10 @@ int msm_gem_get_and_pin_iova_range(struc
 		struct msm_gem_address_space *aspace, uint64_t *iova,
 		u64 range_start, u64 range_end)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	u64 local;
 	int ret;
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 
 	ret = msm_gem_get_iova_locked(obj, aspace, &local,
 		range_start, range_end);
@@ -471,7 +467,7 @@ int msm_gem_get_and_pin_iova_range(struc
 	if (!ret)
 		*iova = local;
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	return ret;
 }
 
@@ -489,12 +485,11 @@ int msm_gem_get_and_pin_iova(struct drm_
 int msm_gem_get_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	int ret;
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 	ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 
 	return ret;
 }
@@ -505,12 +500,11 @@ int msm_gem_get_iova(struct drm_gem_obje
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 	vma = lookup_vma(obj, aspace);
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	WARN_ON(!vma);
 
 	return vma ? vma->iova : 0;
@@ -524,16 +518,15 @@ uint64_t msm_gem_iova(struct drm_gem_obj
 void msm_gem_unpin_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
-	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 	vma = lookup_vma(obj, aspace);
 
 	if (!WARN_ON(!vma))
 		msm_gem_unmap_vma(aspace, vma);
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 }
 
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -574,20 +567,20 @@ static void *get_vaddr(struct drm_gem_ob
 	if (obj->import_attach)
 		return ERR_PTR(-ENODEV);
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 
 	if (WARN_ON(msm_obj->madv > madv)) {
 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 			msm_obj->madv, madv);
-		mutex_unlock(&msm_obj->lock);
+		msm_gem_unlock(obj);
 		return ERR_PTR(-EBUSY);
 	}
 
 	/* increment vmap_count *before* vmap() call, so shrinker can
-	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
+	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 	 * This guarantees that we won't try to msm_gem_vunmap() this
 	 * same object from within the vmap() call (while we already
-	 * hold msm_obj->lock)
+	 * hold msm_obj lock)
 	 */
 	msm_obj->vmap_count++;
 
@@ -605,12 +598,12 @@ static void *get_vaddr(struct drm_gem_ob
 		}
 	}
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	return msm_obj->vaddr;
 
 fail:
 	msm_obj->vmap_count--;
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	return ERR_PTR(ret);
 }
 
@@ -634,10 +627,10 @@ void msm_gem_put_vaddr(struct drm_gem_ob
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 	WARN_ON(msm_obj->vmap_count < 1);
 	msm_obj->vmap_count--;
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 }
 
 /* Update madvise status, returns true if not purged, else
@@ -647,7 +640,7 @@ int msm_gem_madvise(struct drm_gem_objec
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 
 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
@@ -656,7 +649,7 @@ int msm_gem_madvise(struct drm_gem_objec
 
 	madv = msm_obj->madv;
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 
 	return (madv != __MSM_MADV_PURGED);
 }
@@ -693,14 +686,14 @@ void msm_gem_purge(struct drm_gem_object
 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 			0, (loff_t)-1);
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 }
 
 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+	WARN_ON(!msm_gem_is_locked(obj));
 
 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 		return;
@@ -715,7 +708,7 @@ void msm_gem_vunmap(struct drm_gem_objec
 
 	mutex_lock_nested(&msm_obj->lock, subclass);
 	msm_gem_vunmap_locked(obj);
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 }
 
 /* must be called before _move_to_active().. */
@@ -826,7 +819,7 @@ void msm_gem_describe(struct drm_gem_obj
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
 	const char *madv;
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 
 	switch (msm_obj->madv) {
 	case __MSM_MADV_PURGED:
@@ -877,7 +870,7 @@ void msm_gem_describe(struct drm_gem_obj
 		describe_fence(fence, "Exclusive", m);
 	rcu_read_unlock();
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 }
 
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -922,7 +915,7 @@ static void free_object(struct msm_gem_o
 
 	list_del(&msm_obj->mm_list);
 
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 
 	put_iova(obj);
 
@@ -943,7 +936,7 @@ static void free_object(struct msm_gem_o
 
 	drm_gem_object_release(obj);
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 	kfree(msm_obj);
 }
 
@@ -1063,10 +1056,10 @@ static struct drm_gem_object *_msm_gem_n
 		struct msm_gem_vma *vma;
 		struct page **pages;
 
-		mutex_lock(&msm_obj->lock);
+		msm_gem_lock(obj);
 
 		vma = add_vma(obj, NULL);
-		mutex_unlock(&msm_obj->lock);
+		msm_gem_unlock(obj);
 		if (IS_ERR(vma)) {
 			ret = PTR_ERR(vma);
 			goto fail;
@@ -1150,22 +1143,22 @@ struct drm_gem_object *msm_gem_import(st
 	npages = size / PAGE_SIZE;
 
 	msm_obj = to_msm_bo(obj);
-	mutex_lock(&msm_obj->lock);
+	msm_gem_lock(obj);
 	msm_obj->sgt = sgt;
 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 	if (!msm_obj->pages) {
-		mutex_unlock(&msm_obj->lock);
+		msm_gem_unlock(obj);
 		ret = -ENOMEM;
 		goto fail;
 	}
 
 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
 	if (ret) {
-		mutex_unlock(&msm_obj->lock);
+		msm_gem_unlock(obj);
 		goto fail;
 	}
 
-	mutex_unlock(&msm_obj->lock);
+	msm_gem_unlock(obj);
 
 	mutex_lock(&dev->struct_mutex);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -86,6 +86,34 @@ struct msm_gem_object {
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
+static inline void
+msm_gem_lock(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	mutex_lock(&msm_obj->lock);
+}
+
+static inline int
+msm_gem_lock_interruptible(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	return mutex_lock_interruptible(&msm_obj->lock);
+}
+
+static inline void
+msm_gem_unlock(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	mutex_unlock(&msm_obj->lock);
+}
+
+static inline bool
+msm_gem_is_locked(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	return mutex_is_locked(&msm_obj->lock);
+}
+
 static inline bool is_active(struct msm_gem_object *msm_obj)
 {
 	return msm_obj->gpu != NULL;