Re: [PATCH RFC v1 12/15] iommu/virtio: Add support for INVALIDATE request

From: Vivek Kumar Gautam
Date: Thu Mar 04 2021 - 01:18:57 EST


Hi Jacob, Kevin,


On 3/4/21 11:28 AM, Tian, Kevin wrote:
From: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
Sent: Thursday, March 4, 2021 2:29 AM

Hi Vivek,

On Fri, 15 Jan 2021 17:43:39 +0530, Vivek Gautam <vivek.gautam@xxxxxxx>
wrote:

From: Jean-Philippe Brucker <jean-philippe.brucker@xxxxxxx>

Add support for tlb invalidation ops that can send invalidation
requests to back-end virtio-iommu when stage-1 page tables are
supported.

Just curious if it possible to reuse the iommu uapi for invalidation and others.
When we started out designing the iommu uapi, the intention was to support
both emulated and virtio iommu.

IIUC this patch is about the protocol between virtio-iommu frontend and backend.
After the virtio-iommu backend receives invalidation ops, it then needs to
forward the request to the host IOMMU driver through the existing iommu
uapi that you referred to, as a emulated VT-d or SMMU would do.

Thanks a lot for looking at the patch.

Yes this patch is to provide the front-end virtio interface for invalidation requests during map/unmap and when flushing the pasid tables when virtio-iommu requested pasid table (in other words cd tables for arm-smmu-v3) from the iommu-pasid-table library.
The kvmtool back-end virtio driver forwards these requests to vfio driver which then makes use of iommu uapi to finally request host iommu driver for handling these invalidations.

Regards
Vivek


Thanks
Kevin


Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@xxxxxxx>
[Vivek: Refactoring the iommu_flush_ops, and adding only one pasid sync
op that's needed with current iommu-pasid-table infrastructure.
Also updating uapi defines as required by latest changes]
Signed-off-by: Vivek Gautam <vivek.gautam@xxxxxxx>
Cc: Joerg Roedel <joro@xxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: Michael S. Tsirkin <mst@xxxxxxxxxx>
Cc: Robin Murphy <robin.murphy@xxxxxxx>
Cc: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx>
Cc: Eric Auger <eric.auger@xxxxxxxxxx>
Cc: Alex Williamson <alex.williamson@xxxxxxxxxx>
Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
Cc: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
Cc: Liu Yi L <yi.l.liu@xxxxxxxxx>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@xxxxxxx>
Cc: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@xxxxxxxxxx>
---
drivers/iommu/virtio-iommu.c | 95
++++++++++++++++++++++++++++++++++++
1 file changed, 95 insertions(+)

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index ae5dfd3f8269..004ea94e3731 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -13,6 +13,7 @@
#include <linux/freezer.h>
#include <linux/interval_tree.h>
#include <linux/iommu.h>
+#include <linux/io-pgtable.h>
#include <linux/module.h>
#include <linux/of_iommu.h>
#include <linux/of_platform.h>
@@ -63,6 +64,8 @@ struct viommu_mapping {
};

struct viommu_mm {
+ int pasid;
+ u64 archid;
struct io_pgtable_ops *ops;
struct viommu_domain *domain;
};
@@ -692,6 +695,98 @@ static void viommu_event_handler(struct
virtqueue
*vq) virtqueue_kick(vq);
}

+/* PASID and pgtable APIs */
+
+static void __viommu_flush_pasid_tlb_all(struct viommu_domain
*vdomain,
+ int pasid, u64 arch_id, int
type) +{
+ struct virtio_iommu_req_invalidate req = {
+ .head.type = VIRTIO_IOMMU_T_INVALIDATE,
+ .inv_gran =
cpu_to_le32(VIRTIO_IOMMU_INVAL_G_PASID),
+ .flags =
cpu_to_le32(VIRTIO_IOMMU_INVAL_F_PASID),
+ .inv_type = cpu_to_le32(type),
+
+ .domain = cpu_to_le32(vdomain->id),
+ .pasid = cpu_to_le32(pasid),
+ .archid = cpu_to_le64(arch_id),
+ };
+
+ if (viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)))
+ pr_debug("could not send invalidate request\n");
+}
+
+static void viommu_flush_tlb_add(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ struct viommu_mm *viommu_mm = cookie;
+ struct viommu_domain *vdomain = viommu_mm->domain;
+ struct iommu_domain *domain = &vdomain->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
+}
+
+static void viommu_flush_tlb_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ struct viommu_mm *viommu_mm = cookie;
+ struct viommu_domain *vdomain = viommu_mm->domain;
+ struct virtio_iommu_req_invalidate req = {
+ .head.type = VIRTIO_IOMMU_T_INVALIDATE,
+ .inv_gran = cpu_to_le32(VIRTIO_IOMMU_INVAL_G_VA),
+ .inv_type =
cpu_to_le32(VIRTIO_IOMMU_INV_T_IOTLB),
+ .flags =
cpu_to_le32(VIRTIO_IOMMU_INVAL_F_ARCHID), +
+ .domain = cpu_to_le32(vdomain->id),
+ .pasid = cpu_to_le32(viommu_mm->pasid),
+ .archid = cpu_to_le64(viommu_mm->archid),
+ .virt_start = cpu_to_le64(iova),
+ .nr_pages = cpu_to_le64(size / granule),
+ .granule = ilog2(granule),
+ };
+
+ if (viommu_add_req(vdomain->viommu, &req, sizeof(req)))
+ pr_debug("could not add invalidate request\n");
+}
+
+static void viommu_flush_tlb_all(void *cookie)
+{
+ struct viommu_mm *viommu_mm = cookie;
+
+ if (!viommu_mm->archid)
+ return;
+
+ __viommu_flush_pasid_tlb_all(viommu_mm->domain,
viommu_mm->pasid,
+ viommu_mm->archid,
+ VIRTIO_IOMMU_INV_T_IOTLB);
+}
+
+static struct iommu_flush_ops viommu_flush_ops = {
+ .tlb_flush_all = viommu_flush_tlb_all,
+ .tlb_flush_walk = viommu_flush_tlb_walk,
+ .tlb_add_page = viommu_flush_tlb_add,
+};
+
+static void viommu_flush_pasid(void *cookie, int pasid, bool leaf)
+{
+ struct viommu_domain *vdomain = cookie;
+ struct virtio_iommu_req_invalidate req = {
+ .head.type = VIRTIO_IOMMU_T_INVALIDATE,
+ .inv_gran =
cpu_to_le32(VIRTIO_IOMMU_INVAL_G_PASID),
+ .inv_type =
cpu_to_le32(VIRTIO_IOMMU_INV_T_PASID),
+ .flags =
cpu_to_le32(VIRTIO_IOMMU_INVAL_F_PASID), +
+ .domain = cpu_to_le32(vdomain->id),
+ .pasid = cpu_to_le32(pasid),
+ };
+
+ if (leaf)
+ req.flags |=
cpu_to_le32(VIRTIO_IOMMU_INVAL_F_LEAF); +
+ if (viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)))
+ pr_debug("could not send invalidate request\n");
+}
+
/* IOMMU API */

static struct iommu_domain *viommu_domain_alloc(unsigned type)


Thanks,

Jacob