[PATCH v1 10/14] vfio/type1: Pass iommu and dma objects through to vaddr_get_pfn

From: Alex Williamson
Date: Mon Mar 08 2021 - 16:49:53 EST


We'll need these to track vfio device mappings.

Signed-off-by: Alex Williamson <alex.williamson@xxxxxxxxxx>
---
drivers/vfio/vfio_iommu_type1.c | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index f7d35a114354..f22c07a40521 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -579,15 +579,16 @@ static int unmap_dma_pfn_list(struct vfio_iommu *iommu, struct vfio_dma *dma,
* Returns the positive number of pfns successfully obtained or a negative
* error code.
*/
-static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
- long npages, int prot, unsigned long *pfn,
+static int vaddr_get_pfns(struct vfio_iommu *iommu, struct vfio_dma *dma,
+ struct mm_struct *mm, unsigned long vaddr,
+ long npages, unsigned long *pfn,
struct page **pages)
{
struct vm_area_struct *vma;
unsigned int flags = 0;
int ret;

- if (prot & IOMMU_WRITE)
+ if (dma->prot & IOMMU_WRITE)
flags |= FOLL_WRITE;

mmap_read_lock(mm);
@@ -604,7 +605,8 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
vma = find_vma_intersection(mm, vaddr, vaddr + 1);

if (vma && vma->vm_flags & VM_PFNMAP) {
- ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn,
+ dma->prot & IOMMU_WRITE);
if (ret == -EAGAIN)
goto retry;

@@ -680,7 +682,8 @@ static int vfio_wait_all_valid(struct vfio_iommu *iommu)
* the iommu can only map chunks of consecutive pfns anyway, so get the
* first page and all consecutive pages with the same locking.
*/
-static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
+static long vfio_pin_pages_remote(struct vfio_iommu *iommu,
+ struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
unsigned long limit, struct vfio_batch *batch)
{
@@ -708,7 +711,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
/* Empty batch, so refill it. */
long req_pages = min_t(long, npage, batch->capacity);

- ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot,
+ ret = vaddr_get_pfns(iommu, dma, mm, vaddr, req_pages,
&pfn, batch->pages);
if (ret < 0)
goto unpin_out;
@@ -806,7 +809,8 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
return unlocked;
}

-static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+static int vfio_pin_page_external(struct vfio_iommu *iommu,
+ struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
struct page *pages[1];
@@ -817,7 +821,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
if (!mm)
return -ENODEV;

- ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
+ ret = vaddr_get_pfns(iommu, dma, mm, vaddr, 1, pfn_base, pages);
if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
ret = vfio_lock_acct(dma, 1, true);
if (ret) {
@@ -925,8 +929,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
}

remote_vaddr = dma->vaddr + (iova - dma->iova);
- ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
- do_accounting);
+ ret = vfio_pin_page_external(iommu, dma, remote_vaddr,
+ &phys_pfn[i], do_accounting);
if (ret)
goto pin_unwind;

@@ -1497,7 +1501,7 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,

while (size) {
/* Pin a contiguous chunk of memory */
- npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
+ npage = vfio_pin_pages_remote(iommu, dma, vaddr + dma->size,
size >> PAGE_SHIFT, &pfn, limit,
&batch);
if (npage <= 0) {
@@ -1759,7 +1763,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
size_t n = dma->iova + dma->size - iova;
long npage;

- npage = vfio_pin_pages_remote(dma, vaddr,
+ npage = vfio_pin_pages_remote(iommu, dma, vaddr,
n >> PAGE_SHIFT,
&pfn, limit,
&batch);