Blob Blame History Raw
From: Joerg Roedel <jroedel@suse.de>
Date: Fri, 2 Jun 2017 15:44:57 +0200
Subject: iommu/amd: Add locking to per-domain flush-queue
Git-commit: e241f8e76c152e000d481fc8334d41d22c013fe8
Patch-mainline: v4.13-rc1
References: bsc#1045709

With locking we can safely access the flush-queues of other
cpus.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 drivers/iommu/amd_iommu.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -146,6 +146,7 @@ struct flush_queue_entry {
 struct flush_queue {
 	struct flush_queue_entry *entries;
 	unsigned head, tail;
+	spinlock_t lock;
 };
 
 /*
@@ -1801,6 +1802,8 @@ static int dma_ops_domain_alloc_flush_qu
 			dma_ops_domain_free_flush_queue(dom);
 			return -ENOMEM;
 		}
+
+		spin_lock_init(&queue->lock);
 	}
 
 	return 0;
@@ -1808,6 +1811,8 @@ static int dma_ops_domain_alloc_flush_qu
 
 static inline bool queue_ring_full(struct flush_queue *queue)
 {
+	assert_spin_locked(&queue->lock);
+
 	return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
 }
 
@@ -1819,6 +1824,8 @@ static void queue_release(struct dma_ops
 {
 	unsigned i;
 
+	assert_spin_locked(&queue->lock);
+
 	queue_ring_for_each(i, queue)
 		free_iova_fast(&dom->iovad,
 			       queue->entries[i].iova_pfn,
@@ -1831,6 +1838,7 @@ static inline unsigned queue_ring_add(st
 {
 	unsigned idx = queue->tail;
 
+	assert_spin_locked(&queue->lock);
 	queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
 
 	return idx;
@@ -1840,12 +1848,14 @@ static void queue_add(struct dma_ops_dom
 		      unsigned long address, unsigned long pages)
 {
 	struct flush_queue *queue;
+	unsigned long flags;
 	int idx;
 
 	pages     = __roundup_pow_of_two(pages);
 	address >>= PAGE_SHIFT;
 
 	queue = get_cpu_ptr(dom->flush_queue);
+	spin_lock_irqsave(&queue->lock, flags);
 
 	if (queue_ring_full(queue)) {
 		domain_flush_tlb(&dom->domain);
@@ -1858,6 +1868,7 @@ static void queue_add(struct dma_ops_dom
 	queue->entries[idx].iova_pfn = address;
 	queue->entries[idx].pages    = pages;
 
+	spin_unlock_irqrestore(&queue->lock, flags);
 	put_cpu_ptr(dom->flush_queue);
 }