The IOPF handler is responsible for delivering I/O page faults to user space. When an I/O page fault occurs, the fault is placed in the fault pending list of the hardware page table (HWPT). The HWPT then generates a fault event, which is used to notify user space of the fault. User space can then fetch the fault information from the HWPT and handle the fault accordingly.
Signed-off-by: Yi Liu yi.l.liu@intel.com Signed-off-by: Lu Baolu baolu.lu@linux.intel.com --- drivers/iommu/iommufd/iommufd_private.h | 8 ++++ drivers/iommu/iommufd/hw_pagetable.c | 50 +++++++++++++++++++++++++ 2 files changed, 58 insertions(+)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 5ff139acc5c0..8ff7721ea922 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -243,6 +243,14 @@ struct hw_pgtable_fault { struct eventfd_ctx *trigger; };
+struct iommufd_fault { + struct device *dev; + ioasid_t pasid; + struct iommu_hwpt_pgfault fault; + /* List head at hw_pgtable_fault:deliver or response */ + struct list_head item; +}; + /* * A HW pagetable is called an iommu_domain inside the kernel. This user object * allows directly creating and inspecting the domains. Domains that have kernel diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index d6d550c3d0cc..4d07c7c0073e 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -12,6 +12,9 @@
static struct hw_pgtable_fault *hw_pagetable_fault_alloc(int eventfd); static void hw_pagetable_fault_free(struct hw_pgtable_fault *fault); +static enum iommu_page_response_code +iommufd_hw_pagetable_iopf_handler(struct iommu_fault *fault, + struct device *dev, void *data);
void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) { @@ -300,6 +303,10 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) rc = PTR_ERR(hwpt->fault); goto out_hwpt; } + + iommu_domain_set_iopf_handler(hwpt->domain, + iommufd_hw_pagetable_iopf_handler, + hwpt); }
cmd->out_hwpt_id = hwpt->obj.id; @@ -367,6 +374,49 @@ int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd) return rc; }
+static void iommufd_compose_fault_message(struct iommu_fault *fault, + struct iommu_hwpt_pgfault *hwpt_fault, + unsigned int dev_id) +{ + hwpt_fault->size = sizeof(*hwpt_fault); + hwpt_fault->flags = fault->prm.flags; + hwpt_fault->dev_id = dev_id; + hwpt_fault->pasid = fault->prm.pasid; + hwpt_fault->grpid = fault->prm.grpid; + hwpt_fault->perm = fault->prm.perm; + hwpt_fault->addr = fault->prm.addr; + hwpt_fault->private_data[0] = fault->prm.private_data[0]; + hwpt_fault->private_data[1] = fault->prm.private_data[1]; +} + +static enum iommu_page_response_code +iommufd_hw_pagetable_iopf_handler(struct iommu_fault *fault, + struct device *dev, void *data) +{ + struct iommufd_hw_pagetable *hwpt = data; + struct iommufd_fault_cookie *cookie; + struct iommufd_fault *ifault; + + ifault = kzalloc(sizeof(*ifault), GFP_KERNEL); + if (!ifault) + return IOMMU_PAGE_RESP_FAILURE; + + cookie = iommu_get_device_fault_cookie(dev, fault->prm.pasid); + if (!cookie) + return IOMMU_PAGE_RESP_FAILURE; + + iommufd_compose_fault_message(fault, &ifault->fault, cookie->idev->obj.id); + ifault->dev = dev; + ifault->pasid = fault->prm.pasid; + + mutex_lock(&hwpt->fault->mutex); + list_add_tail(&ifault->item, &hwpt->fault->deliver); + eventfd_signal(hwpt->fault->trigger, 1); + mutex_unlock(&hwpt->fault->mutex); + + return IOMMU_PAGE_RESP_ASYNC; +} + static struct hw_pgtable_fault *hw_pagetable_fault_alloc(int eventfd) { struct hw_pgtable_fault *fault;