[RFC PATCH 20/42] iommu/vt-d: Support attach devices to IOMMU_DOMAIN_KVM domain

From: Yan Zhao
Date: Sat Dec 02 2023 - 04:54:28 EST


IOMMU_DOMAIN_KVM domain reuses intel_iommu_attach_device() for device
attachment. But unlike attaching to other dmar_domain, domain caps (e.g.
iommu_superpage) are not updated after device attach. Instead, IOMMU caps
are checked for compatibility before domain attachment.

Signed-off-by: Yan Zhao <yan.y.zhao@xxxxxxxxx>
---
drivers/iommu/intel/iommu.c | 11 +++++++++++
drivers/iommu/intel/iommu.h | 7 +++++++
drivers/iommu/intel/kvm.c | 9 +++++++++
3 files changed, 27 insertions(+)

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index fcdee40f30ed1..9cc42b3d24f65 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -552,6 +552,13 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
/* Some capabilities may be different across iommus */
void domain_update_iommu_cap(struct dmar_domain *domain)
{
+ /*
+ * No need to adjust iommu cap of kvm domain.
+ * Instead, iommu will be checked in pre-attach phase.
+ */
+ if (domain_type_is_kvm(domain))
+ return;
+
domain_update_iommu_coherency(domain);
domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);

@@ -4104,6 +4111,9 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
if (!iommu)
return -ENODEV;

+ if (domain_type_is_kvm(dmar_domain))
+ return prepare_kvm_domain_attach(dmar_domain, iommu);
+
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;

@@ -4117,6 +4127,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain,

if (dmar_domain->max_addr > (1LL << addr_width))
return -EINVAL;
+
dmar_domain->gaw = addr_width;

/*
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 8826e9248f6ed..801700bc7d820 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -1059,6 +1059,13 @@ static inline int width_to_agaw(int width)
#ifdef CONFIG_INTEL_IOMMU_KVM
struct iommu_domain *
intel_iommu_domain_alloc_kvm(struct device *dev, u32 flags, const void *data);
+int prepare_kvm_domain_attach(struct dmar_domain *domain, struct intel_iommu *iommu);
+#else
+static inline int prepare_kvm_domain_attach(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ return 0;
+}
#endif

#endif
diff --git a/drivers/iommu/intel/kvm.c b/drivers/iommu/intel/kvm.c
index 188ec90083051..1ce334785430b 100644
--- a/drivers/iommu/intel/kvm.c
+++ b/drivers/iommu/intel/kvm.c
@@ -32,6 +32,14 @@ static bool is_iommu_cap_compatible_to_kvm_domain(struct dmar_domain *domain,
return true;
}

+int prepare_kvm_domain_attach(struct dmar_domain *domain, struct intel_iommu *iommu)
+{
+ if (is_iommu_cap_compatible_to_kvm_domain(domain, iommu))
+ return 0;
+
+ return -EINVAL;
+}
+
/*
* Cache coherency is always enforced in KVM domain.
* IOMMU hardware caps will be checked to allow the cache coherency before
@@ -43,6 +51,7 @@ static bool kvm_domain_enforce_cache_coherency(struct iommu_domain *domain)
}

static const struct iommu_domain_ops intel_kvm_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
.free = intel_iommu_domain_free,
.enforce_cache_coherency = kvm_domain_enforce_cache_coherency,
};
--
2.17.1