Add support in the OP-TEE backend driver for protected memory allocation. The support is limited to only the SMC ABI and for secure video buffers.
OP-TEE is probed for the range of protected physical memory and a memory pool allocator is initialized if OP-TEE have support for such memory.
Signed-off-by: Jens Wiklander jens.wiklander@linaro.org --- drivers/tee/optee/Kconfig | 5 +++ drivers/tee/optee/core.c | 10 +++++ drivers/tee/optee/optee_private.h | 2 + drivers/tee/optee/smc_abi.c | 70 ++++++++++++++++++++++++++++++- 4 files changed, 85 insertions(+), 2 deletions(-)
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 7bb7990d0b07..50d2051f7f20 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -25,3 +25,8 @@ config OPTEE_INSECURE_LOAD_IMAGE
Additional documentation on kernel security risks are at Documentation/tee/op-tee.rst. + +config OPTEE_STATIC_PROTMEM_POOL + bool + depends on HAS_IOMEM && TEE_DMABUF_HEAPS + default y diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index c75fddc83576..4b14a7ac56f9 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -56,6 +56,15 @@ int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action, return 0; }
+int optee_set_dma_mask(struct optee *optee, u_int pa_width) +{ + u64 mask = DMA_BIT_MASK(min(64, pa_width)); + + optee->teedev->dev.dma_mask = &optee->teedev->dev.coherent_dma_mask; + + return dma_set_mask_and_coherent(&optee->teedev->dev, mask); +} + static void optee_bus_scan(struct work_struct *work) { WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); @@ -181,6 +190,7 @@ void optee_remove_common(struct optee *optee) tee_device_unregister(optee->supp_teedev); tee_device_unregister(optee->teedev);
+ tee_device_unregister_all_dma_heaps(optee->teedev); tee_shm_pool_free(optee->pool); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index dc0f355ef72a..5e3c34802121 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -272,6 +272,8 @@ struct optee_call_ctx {
extern struct blocking_notifier_head optee_rpmb_intf_added;
+int optee_set_dma_mask(struct optee *optee, u_int pa_width); + int optee_notif_init(struct optee *optee, u_int max_key); void optee_notif_uninit(struct optee *optee); int optee_notif_wait(struct optee *optee, u_int key, u32 timeout); diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index f0c3ac1103bb..cf106d15e64e 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -1584,6 +1584,68 @@ static inline int optee_load_fw(struct platform_device *pdev, } #endif
+static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee) +{ +#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL) + union { + struct arm_smccc_res smccc; + struct optee_smc_get_protmem_config_result result; + } res; + struct tee_protmem_pool *pool; + void *p; + int rc; + + optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0, + 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) + return ERR_PTR(-EINVAL); + + rc = optee_set_dma_mask(optee, res.result.pa_width); + if (rc) + return ERR_PTR(rc); + + /* + * Map the memory as uncached to make sure the kernel can work with + * __pfn_to_page() and friends since that's needed when passing the + * protected DMA-buf to a device. The memory should otherwise not + * be touched by the kernel since it's likely to cause an external + * abort due to the protection status. + */ + p = devm_memremap(&optee->teedev->dev, res.result.start, + res.result.size, MEMREMAP_WC); + if (IS_ERR(p)) + return p; + + pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size); + if (IS_ERR(pool)) + devm_memunmap(&optee->teedev->dev, p); + + return pool; +#else + return ERR_PTR(-EINVAL); +#endif +} + +static int optee_protmem_pool_init(struct optee *optee) +{ + enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY; + struct tee_protmem_pool *pool = ERR_PTR(-EINVAL); + int rc; + + if (!(optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM)) + return 0; + + pool = static_protmem_pool_init(optee); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool); + if (rc) + pool->ops->destroy_pool(pool); + + return rc; +} + static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; @@ -1679,7 +1741,7 @@ static int optee_probe(struct platform_device *pdev) optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { rc = -ENOMEM; - goto err_free_pool; + goto err_free_shm_pool; }
optee->ops = &optee_ops; @@ -1752,6 +1814,9 @@ static int optee_probe(struct platform_device *pdev) pr_info("Asynchronous notifications enabled\n"); }
+ if (optee_protmem_pool_init(optee)) + pr_info("Protected memory service not available\n"); + /* * Ensure that there are no pre-existing shm objects before enabling * the shm cache so that there's no chance of receiving an invalid @@ -1787,6 +1852,7 @@ static int optee_probe(struct platform_device *pdev) optee_disable_shm_cache(optee); optee_smc_notif_uninit_irq(optee); optee_unregister_devices(); + tee_device_unregister_all_dma_heaps(optee->teedev); err_notif_uninit: optee_notif_uninit(optee); err_close_ctx: @@ -1803,7 +1869,7 @@ static int optee_probe(struct platform_device *pdev) tee_device_unregister(optee->teedev); err_free_optee: kfree(optee); -err_free_pool: +err_free_shm_pool: tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm);