[PATCH 5.15 905/913] iommu/dma: Skip extra sync during unmap w/swiotlb

From: Greg Kroah-Hartman
Date: Tue Apr 05 2022 - 17:38:12 EST


From: David Stevens <stevensd@xxxxxxxxxxxx>

commit ee9d4097cc145dcaebedf6b113d17c91c21333a0 upstream.

Calling the iommu_dma_sync_*_for_cpu functions during unmap can cause
two copies out of the swiotlb buffer. Do the arch sync directly in
__iommu_dma_unmap_swiotlb instead to avoid this. This makes the call to
iommu_dma_sync_sg_for_cpu for untrusted devices in iommu_dma_unmap_sg no
longer necessary, so move that invocation later in the function.

Signed-off-by: David Stevens <stevensd@xxxxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Reviewed-by: Robin Murphy <robin.murphy@xxxxxxx>
Link: https://lore.kernel.org/r/20210929023300.335969-4-stevensd@xxxxxxxxxx
Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
Cc: Mario Limonciello <Mario.Limonciello@xxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
drivers/iommu/dma-iommu.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)

--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -521,6 +521,9 @@ static void __iommu_dma_unmap_swiotlb(st
if (WARN_ON(!phys))
return;

+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(phys, size, dir);
+
__iommu_dma_unmap(dev, dma_addr, size);

if (unlikely(is_swiotlb_buffer(dev, phys)))
@@ -871,8 +874,6 @@ static dma_addr_t iommu_dma_map_page(str
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
__iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
}

@@ -1089,14 +1090,14 @@ static void iommu_dma_unmap_sg(struct de
struct scatterlist *tmp;
int i;

- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
-
if (dev_is_untrusted(dev)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}

+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.