On 4/26/25 13:58, Nicolin Chen wrote:
For vIOMMU passing through HW resources to user space (VMs), add an mmap infrastructure to map a region of hardware MMIO pages.
Maintain an mt_mmap per ictx for validations. To allow IOMMU drivers to add and delete mmappable regions to/from the mt_mmap, add a pair of new helpers: iommufd_ctx_alloc_mmap() and iommufd_ctx_free_mmap().
I am wondering why the dma_buf mechanism isn't used here, considering that this also involves an export and import pattern.
Signed-off-by: Nicolin Chen nicolinc@nvidia.com
drivers/iommu/iommufd/iommufd_private.h | 8 +++++ include/linux/iommufd.h | 15 ++++++++++ drivers/iommu/iommufd/driver.c | 39 +++++++++++++++++++++++++ drivers/iommu/iommufd/main.c | 39 +++++++++++++++++++++++++ 4 files changed, 101 insertions(+)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index b974c207ae8a..db5b62ec4abb 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -7,6 +7,7 @@ #include <linux/iommu.h> #include <linux/iommufd.h> #include <linux/iova_bitmap.h> +#include <linux/maple_tree.h> #include <linux/rwsem.h> #include <linux/uaccess.h> #include <linux/xarray.h> @@ -44,6 +45,7 @@ struct iommufd_ctx { struct xarray groups; wait_queue_head_t destroy_wait; struct rw_semaphore ioas_creation_lock;
- struct maple_tree mt_mmap;
struct mutex sw_msi_lock; struct list_head sw_msi_list; @@ -55,6 +57,12 @@ struct iommufd_ctx { struct iommufd_ioas *vfio_ioas; }; +/* Entry for iommufd_ctx::mt_mmap */ +struct iommufd_mmap {
- unsigned long pfn_start;
- unsigned long pfn_end;
+};
This structure is introduced to represent a mappable/mapped region, right? It would be better to add comments specifying whether the start and end are inclusive or exclusive.
- /*
- The IOVA to PFN map. The map automatically copies the PFNs into multiple
- domains and permits sharing of PFNs between io_pagetable instances. This
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index 5dff154e8ce1..d63e2d91be0d 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -236,6 +236,9 @@ int iommufd_object_depend(struct iommufd_object *obj_dependent, struct iommufd_object *obj_depended); void iommufd_object_undepend(struct iommufd_object *obj_dependent, struct iommufd_object *obj_depended); +int iommufd_ctx_alloc_mmap(struct iommufd_ctx *ictx, phys_addr_t base,
size_t size, unsigned long *immap_id);
+void iommufd_ctx_free_mmap(struct iommufd_ctx *ictx, unsigned long immap_id); struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id); int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu, @@ -262,11 +265,23 @@ static inline int iommufd_object_depend(struct iommufd_object *obj_dependent, return -EOPNOTSUPP; } +static inline int iommufd_ctx_alloc_mmap(struct iommufd_ctx *ictx,
phys_addr_t base, size_t size,
unsigned long *immap_id)
+{
- return -EOPNOTSUPP;
+}
- static inline void iommufd_object_undepend(struct iommufd_object *obj_dependent, struct iommufd_object *obj_depended) { }
+static inline void iommufd_ctx_free_mmap(struct iommufd_ctx *ictx,
unsigned long immap_id)
+{ +}
- static inline struct device * iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id) {
diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c index fb7f8fe40f95..c55336c580dc 100644 --- a/drivers/iommu/iommufd/driver.c +++ b/drivers/iommu/iommufd/driver.c @@ -78,6 +78,45 @@ void iommufd_object_undepend(struct iommufd_object *obj_dependent, } EXPORT_SYMBOL_NS_GPL(iommufd_object_undepend, "IOMMUFD"); +/* Driver should report the output @immap_id to user space for mmap() syscall */ +int iommufd_ctx_alloc_mmap(struct iommufd_ctx *ictx, phys_addr_t base,
size_t size, unsigned long *immap_id)
+{
- struct iommufd_mmap *immap;
- int rc;
- if (WARN_ON_ONCE(!immap_id))
return -EINVAL;
- if (base & ~PAGE_MASK)
return -EINVAL;
Is it equal to PAGE_ALIGNED()?
- if (!size || size & ~PAGE_MASK)
return -EINVAL;
- immap = kzalloc(sizeof(*immap), GFP_KERNEL);
- if (!immap)
return -ENOMEM;
- immap->pfn_start = base >> PAGE_SHIFT;
- immap->pfn_end = immap->pfn_start + (size >> PAGE_SHIFT) - 1;
- rc = mtree_alloc_range(&ictx->mt_mmap, immap_id, immap, sizeof(immap),
0, LONG_MAX >> PAGE_SHIFT, GFP_KERNEL);
- if (rc < 0) {
kfree(immap);
return rc;
- }
- /* mmap() syscall will right-shift the immap_id to vma->vm_pgoff */
- *immap_id <<= PAGE_SHIFT;
- return 0;
+} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_alloc_mmap, "IOMMUFD");
+void iommufd_ctx_free_mmap(struct iommufd_ctx *ictx, unsigned long immap_id) +{
- kfree(mtree_erase(&ictx->mt_mmap, immap_id >> PAGE_SHIFT));
MMIO lifecycle question: what happens if a region is removed from the maple tree (and is therefore no longer mappable), but is still mapped and in use by userspace?
+} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_free_mmap, "IOMMUFD");
- /* Caller should xa_lock(&viommu->vdevs) to protect the return value */ struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
Thanks, baolu