[PATCH 4/5] iommu/amd: Hold the domain lock when calling iommu_map_page

From: Filippo Sironi
Date: Tue Sep 10 2019 - 13:49:52 EST


iommu_map_page calls into __domain_flush_pages, which requires the
domain lock since it traverses the device list, which the lock protects.

Signed-off-by: Filippo Sironi <sironi@xxxxxxxxx>
---
drivers/iommu/amd_iommu.c | 5 +++++
1 file changed, 5 insertions(+)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d4f25767622e..3714ae5ded31 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2562,6 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long address;
u64 dma_mask;
int ret;
+ unsigned long flags;

domain = get_domain(dev);
if (IS_ERR(domain))
@@ -2587,7 +2588,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,

bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
+ spin_lock_irqsave(&domain->lock, flags);
ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+ spin_unlock_irqrestore(&domain->lock, flags);
if (ret)
goto out_unmap;

@@ -3095,7 +3098,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
prot |= IOMMU_PROT_IW;

mutex_lock(&domain->api_lock);
+ spin_lock(&domain->lock);
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
+ spin_unlock(&domain->lock);
mutex_unlock(&domain->api_lock);

domain_flush_np_cache(domain, iova, page_size);
--
2.7.4