[PATCH v4 4/5] iommu/uapi: Handle data and argsz filled by users

From: Jacob Pan
Date: Tue Jul 07 2020 - 19:37:26 EST


IOMMU UAPI data has a user filled argsz field which indicates the data
length comes with the API call. User data is not trusted, argsz must be
validated based on the current kernel data size, mandatory data size,
and feature flags.

User data may also be extended, results in possible argsz increase.
Backward compatibility is ensured based on size and flags checking.

This patch adds sanity checks in the IOMMU layer. In addition to argsz,
reserved/unused fields in padding, flags, and version are also checked.
Details are documented in Documentation/userspace-api/iommu.rst

Signed-off-by: Liu Yi L <yi.l.liu@xxxxxxxxx>
Signed-off-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx>
---
drivers/iommu/iommu.c | 208 ++++++++++++++++++++++++++++++++++++++++++++++++--
include/linux/iommu.h | 9 ++-
2 files changed, 206 insertions(+), 11 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d43120eb1dc5..7910249f5dd7 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1950,33 +1950,225 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_attach_device);

+/*
+ * Check flags and other user privided data for valid combinations. We also
+ * make sure no reserved fields or unused flags are not set. This is to ensure
+ * not breaking userspace in the future when these fields or flags are used.
+ */
+static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
+{
+ int ret = 0;
+ u32 mask;
+
+ if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+ return -EINVAL;
+
+ mask = IOMMU_CACHE_INV_TYPE_IOTLB |
+ IOMMU_CACHE_INV_TYPE_DEV_IOTLB |
+ IOMMU_CACHE_INV_TYPE_PASID;
+ if (info->cache & ~mask) {
+ pr_warn_ratelimited("Invalid cache types %x\n", info->cache);
+ return -EINVAL;
+ }
+
+ if (info->granularity >= IOMMU_INV_GRANU_NR) {
+ pr_warn_ratelimited("Invalid cache invalidation granu %x\n",
+ info->granularity);
+ return -EINVAL;
+ }
+
+ switch (info->granularity) {
+ case IOMMU_INV_GRANU_ADDR:
+ mask = IOMMU_INV_ADDR_FLAGS_PASID |
+ IOMMU_INV_ADDR_FLAGS_ARCHID |
+ IOMMU_INV_ADDR_FLAGS_LEAF;
+
+ if (info->granu.addr_info.flags & ~mask) {
+ pr_warn_ratelimited("Unsupported invalidation addr flags %x\n",
+ info->granu.addr_info.flags);
+ ret = -EINVAL;
+ }
+ break;
+ case IOMMU_INV_GRANU_PASID:
+ mask = IOMMU_INV_PASID_FLAGS_PASID |
+ IOMMU_INV_PASID_FLAGS_ARCHID;
+ if (info->granu.pasid_info.flags & ~mask) {
+ pr_warn_ratelimited("Unsupported invalidation PASID flags%x\n",
+ info->granu.pasid_info.flags);
+ ret = -EINVAL;
+ }
+ break;
+ }
+
+ if (info->padding[0] || info->padding[1]) {
+ pr_warn_ratelimited("Non-zero reserved fields\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
- struct iommu_cache_invalidate_info *inv_info)
+ void __user *uinfo)
{
+ struct iommu_cache_invalidate_info inv_info;
+ unsigned long minsz, maxsz;
+ int ret = 0;
+
if (unlikely(!domain->ops->cache_invalidate))
return -ENODEV;

- return domain->ops->cache_invalidate(domain, dev, inv_info);
+ /* Current kernel data size is the max to be copied from user */
+ maxsz = sizeof(struct iommu_cache_invalidate_info);
+ memset((void *)&inv_info, 0, maxsz);
+
+ /*
+ * No new spaces can be added before the variable sized union, the
+ * minimum size is the offset to the union.
+ */
+ minsz = offsetof(struct iommu_cache_invalidate_info, granu);
+
+ /* Copy minsz from user to get flags and argsz */
+ if (copy_from_user(&inv_info, uinfo, minsz))
+ return -EFAULT;
+
+ /* Fields before variable size union is mandatory */
+ if (inv_info.argsz < minsz)
+ return -EINVAL;
+
+ /*
+ * User might be using a newer UAPI header which has a larger data
+ * size, we shall support the existing flags within the current
+ * size.
+ */
+ if (inv_info.argsz > maxsz)
+ inv_info.argsz = maxsz;
+
+ /* Copy the remaining user data _after_ minsz */
+ if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
+ inv_info.argsz - minsz))
+ return -EFAULT;
+
+ /* Now the argsz is validated, check the content */
+ ret = iommu_check_cache_invl_data(&inv_info);
+ if (ret)
+ return ret;
+
+ return domain->ops->cache_invalidate(domain, dev, &inv_info);
}
EXPORT_SYMBOL_GPL(iommu_cache_invalidate);

-int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data)
+
+static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
{
+ u32 mask;
+ int i;
+
+ if (data->version != IOMMU_GPASID_BIND_VERSION_1)
+ return -EINVAL;
+
+ /* Check all supported format, for now just VT-d */
+ mask = IOMMU_PASID_FORMAT_INTEL_VTD;
+ if (data->format & ~mask)
+ return -EINVAL;
+
+ /* Check all flags */
+ mask = IOMMU_SVA_GPASID_VAL;
+ if (data->flags & ~mask)
+ return -EINVAL;
+
+ /* Check reserved padding fields */
+ for (i = 0; i < 12; i++) {
+ if (data->padding[i]) {
+ pr_warn_ratelimited("Non-zero reserved field\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int iommu_sva_prepare_bind_data(void __user *udata,
+ struct iommu_gpasid_bind_data *data)
+{
+ unsigned long minsz, maxsz;
+
+ /* Current kernel data size is the max to be copied from user */
+ maxsz = sizeof(struct iommu_gpasid_bind_data);
+ memset((void *)data, 0, maxsz);
+
+ /*
+ * No new spaces can be added before the variable sized union, the
+ * minimum size is the offset to the union.
+ */
+ minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
+
+ /* Copy minsz from user to get flags and argsz */
+ if (copy_from_user(data, udata, minsz))
+ return -EFAULT;
+
+ /* Fields before variable size union is mandatory */
+ if (data->argsz < minsz)
+ return -EINVAL;
+ /*
+ * User might be using a newer UAPI header, we shall let IOMMU vendor
+ * driver decide on what size it needs. Since the guest PASID bind data
+ * can be vendor specific, larger argsz could be the result of extension
+ * for one vendor but it should not affect another vendor.
+ */
+ if (data->argsz > maxsz)
+ data->argsz = maxsz;
+
+ /* Copy the remaining user data _after_ minsz */
+ if (copy_from_user((void *)data + minsz, udata + minsz,
+ data->argsz - minsz))
+ return -EFAULT;
+
+ return iommu_check_bind_data(data);
+}
+
+int iommu_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ void __user *udata)
+{
+
+ struct iommu_gpasid_bind_data data;
+ int ret;
+
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;

- return domain->ops->sva_bind_gpasid(domain, dev, data);
+ ret = iommu_sva_prepare_bind_data(udata, &data);
+ if (ret)
+ return ret;
+
+ return domain->ops->sva_bind_gpasid(domain, dev, &data);
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);

-int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
- ioasid_t pasid)
+int __iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ struct iommu_gpasid_bind_data *data)
{
if (unlikely(!domain->ops->sva_unbind_gpasid))
return -ENODEV;

- return domain->ops->sva_unbind_gpasid(dev, pasid);
+ return domain->ops->sva_unbind_gpasid(dev, data->hpasid);
+}
+EXPORT_SYMBOL_GPL(__iommu_sva_unbind_gpasid);
+
+int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ void __user *udata)
+{
+ struct iommu_gpasid_bind_data data;
+ int ret;
+
+ if (unlikely(!domain->ops->sva_bind_gpasid))
+ return -ENODEV;
+
+ ret = iommu_sva_prepare_bind_data(udata, &data);
+ if (ret)
+ return ret;
+
+ return __iommu_sva_unbind_gpasid(domain, dev, &data);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 5f0b7859d2eb..7ca9d48c276c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -432,11 +432,14 @@ extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern int iommu_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
- struct iommu_cache_invalidate_info *inv_info);
+ void __user *uinfo);
+
extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data);
+ struct device *dev, void __user *udata);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid);
+ struct device *dev, void __user *udata);
+extern int __iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
--
2.7.4