On Sun, Nov 27, 2022 at 06:49:29PM +0100, Eric Auger wrote:
+static int iommufd_ioas_load_iovas(struct rb_root_cached *itree,
struct iommu_iova_range __user *ranges,u32 num)+{
- u32 i;
 - for (i = 0; i != num; i++) {
 
shouldn't it be < ?
It is logically equivalent
+int iommufd_ioas_allow_iovas(struct iommufd_ucmd *ucmd) +{
- struct iommu_ioas_allow_iovas *cmd = ucmd->cmd;
 - struct rb_root_cached allowed_iova = RB_ROOT_CACHED;
 - struct interval_tree_node *node;
 - struct iommufd_ioas *ioas;
 - struct io_pagetable *iopt;
 - int rc = 0;
 - if (cmd->__reserved)
 return -EOPNOTSUPP;- ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
 - if (IS_ERR(ioas))
 return PTR_ERR(ioas);- iopt = &ioas->iopt;
 - rc = iommufd_ioas_load_iovas(&allowed_iova,
 u64_to_user_ptr(cmd->allowed_iovas),cmd->num_iovas);- if (rc)
 goto out_free;- rc = iopt_set_allow_iova(iopt, &allowed_iova);
 Please can you add a comment about why you need to proceed in 2 steps, ie. add the ranges in a first tree and then 'swap' to the iopt->allowed_tree (and eventually delete the first tree)?
Sure
/* * We want the allowed tree update to be atomic, so we have to keep the * original nodes around, and keep track of the new nodes as we allocate * memory for them. The simplest solution is to have a new/old tree and * then swap new for old. On success we free the old tree, on failure we * free the new tree. */
+static int conv_iommu_prot(u32 map_flags) +{
- int iommu_prot;
 - /*
 * We provide no manual cache coherency ioctls to userspace and most* architectures make the CPU ops for cache flushing privileged.* Therefore we require the underlying IOMMU to support CPU coherent* operation. Support for IOMMU_CACHE is enforced by the* IOMMU_CAP_CACHE_COHERENCY test during bind.*/- iommu_prot = IOMMU_CACHE;
 at init?
done
+int iommufd_ioas_map(struct iommufd_ucmd *ucmd) +{
- struct iommu_ioas_map *cmd = ucmd->cmd;
 - struct iommufd_ioas *ioas;
 - unsigned int flags = 0;
 - unsigned long iova;
 - int rc;
 - if ((cmd->flags &
 ~(IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE |IOMMU_IOAS_MAP_READABLE)) ||cmd->__reserved)return -EOPNOTSUPP;- if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
 return -EOVERFLOW;- ioas = iommufd_get_ioas(ucmd, cmd->ioas_id);
 - if (IS_ERR(ioas))
 return PTR_ERR(ioas);- if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
 flags = IOPT_ALLOC_IOVA;- iova = cmd->iova;
 can be done either at initialization or only if MAP_FIXED_IOVA.
Done
+int iommufd_option_rlimit_mode(struct iommu_option *cmd,
struct iommufd_ctx *ictx)+{
*object_id and __reserved should be checked as per the uapi doc*
Ohh, yes, thanks:
@@ -317,6 +322,9 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd) int iommufd_option_rlimit_mode(struct iommu_option *cmd, struct iommufd_ctx *ictx) { + if (cmd->object_id) + return -EOPNOTSUPP; + if (cmd->op == IOMMU_OPTION_OP_GET) { cmd->val64 = ictx->account_mode == IOPT_PAGES_ACCOUNT_MM; return 0; diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index de5cc01023c0c5..bcb463e581009c 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -215,6 +215,9 @@ static int iommufd_option(struct iommufd_ucmd *ucmd) struct iommu_option *cmd = ucmd->cmd; int rc;
+ if (cmd->__reserved) + return -EOPNOTSUPP; + switch (cmd->option_id) { case IOMMU_OPTION_RLIMIT_MODE: rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx);
+/**
- struct iommu_ioas_iova_ranges - ioctl(IOMMU_IOAS_IOVA_RANGES)
 
- @size: sizeof(struct iommu_ioas_iova_ranges)
 
- @ioas_id: IOAS ID to read ranges from
 
- @num_iovas: Input/Output total number of ranges in the IOAS
 
- @__reserved: Must be 0
 
- @allowed_iovas: Pointer to the output array of struct iommu_iova_range
 
- @out_iova_alignment: Minimum alignment required for mapping IOVA
 
- Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges
 
- is not allowed. num_iovas will be set to the total number of iovas and
 
- the allowed_iovas[] will be filled in as space permits.
 
- The allowed ranges are dependent on the HW path the DMA operation takes, and
 
- can change during the lifetime of the IOAS. A fresh empty IOAS will have a
 
- full range, and each attached device will narrow the ranges based on that
 
- device's HW restrictions. Detatching a device can widen the ranges. Userspace
 detaching
- should query ranges after every attach/detatch to know what IOVAs are valid
 detach
Done
- for mapping.
 
- On input num_iovas is the length of the allowed_iovas array. On output it is
 
- the total number of iovas filled in. The ioctl will return -EMSGSIZE and set
 
- num_iovas to the required value if num_iovas is too small. In this case the
 
- caller should allocate a larger output array and re-issue the ioctl.
 - */
 +struct iommu_ioas_iova_ranges {
- __u32 size;
 - __u32 ioas_id;
 - __u32 num_iovas;
 - __u32 __reserved;
 - __aligned_u64 allowed_iovas;
 - __aligned_u64 out_iova_alignment;
 document @out_iova_alignment?
* out_iova_alignment returns the minimum IOVA alignment that can be given * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy: * starting_iova % out_iova_alignment == 0 * (starting_iova + length) % out_iova_alignment == 0 * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot * be higher than the system PAGE_SIZE.
+/**
- struct iommu_ioas_map - ioctl(IOMMU_IOAS_MAP)
 
- @size: sizeof(struct iommu_ioas_map)
 
- @flags: Combination of enum iommufd_ioas_map_flags
 
- @ioas_id: IOAS ID to change the mapping of
 
- @__reserved: Must be 0
 
- @user_va: Userspace pointer to start mapping from
 
- @length: Number of bytes to map
 
- @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set
 
then this must be provided as input.
- Set an IOVA mapping from a user pointer. If FIXED_IOVA is specified then the
 
- mapping will be established at iova, otherwise a suitable location based on
 
- the reserved and allowed lists will be automatically selected and returned in
 
- iova.
 You do not mention anything about the fact the IOCTL cannot be called twice for a given @user_va w/ FIXED_IOVA Refering to VFIO_DMA_MAP_FLAG_VADDR.
* If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently * be unused, existing IOVA cannot be replaced.
+/**
- struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY)
 
- @size: sizeof(struct iommu_ioas_copy)
 
- @flags: Combination of enum iommufd_ioas_map_flags
 
- @dst_ioas_id: IOAS ID to change the mapping of
 
- @src_ioas_id: IOAS ID to copy from
 
- @length: Number of bytes to copy and map
 
- @dst_iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is
 
set then this must be provided as input.
- @src_iova: IOVA to start the copy
 
- Copy an already existing mapping from src_ioas_id and establish it in
 
- dst_ioas_id. The src iova/length must exactly match a range used with
 
- IOMMU_IOAS_MAP.
 
- This may be used to efficiently clone a subset of an IOAS to another, or as a
 
- kind of 'cache' to speed up mapping. Copy has an effciency advantage over
 efficiency
- establishing equivalent new mappings, as internal resources are shared, and
 
- the kernel will pin the user memory only once.
 - */
 +struct iommu_ioas_copy {
- __u32 size;
 - __u32 flags;
 - __u32 dst_ioas_id;
 - __u32 src_ioas_id;
 is src_ioas_id == dst_ioas_id allowed?
Yes
+/**
- struct iommu_option - iommu option multiplexer
 
- @size: sizeof(struct iommu_option)
 
- @option_id: One of enum iommufd_option
 
- @op: One of enum iommufd_option_ops
 
- @__reserved: Must be 0
 
- @object_id: ID of the object if required
 
- @val64: Option value to set or value returned on get
 
- Change a simple option value. This multiplexor allows controlling a options
 s/a options/options
Done
Thanks, Jason