From: Hridya Valsaraju hridya@google.com
The dma_buf_charge_transfer function provides a way for processes to transfer charge of a buffer to a different process. This is essential for the cases where a central allocator process does allocations for various subsystems, hands over the fd to the client who requested the memory and drops all references to the allocated memory.
Signed-off-by: Hridya Valsaraju hridya@google.com Signed-off-by: T.J. Mercier tjmercier@google.com
--- v4 changes Adjust ordering of charge/uncharge during transfer to avoid potentially hitting cgroup limit per Michal Koutný.
v3 changes Use more common dual author commit message format per John Stultz.
v2 changes Move dma-buf cgroup charge transfer from a dma_buf_op defined by every heap to a single dma-buf function for all heaps per Daniel Vetter and Christian König. --- drivers/dma-buf/dma-buf.c | 49 +++++++++++++++++++++++++++++++ include/linux/cgroup_gpu.h | 12 ++++++++ include/linux/dma-buf.h | 2 ++ kernel/cgroup/gpu.c | 59 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 122 insertions(+)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1ee5c60d3d6d..7748c3453b91 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1380,6 +1380,55 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) } EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+/** + * dma_buf_transfer_charge - Change the GPU cgroup to which the provided dma_buf is charged. + * @dmabuf: [in] buffer whose charge will be migrated to a different GPU cgroup + * @gpucg: [in] the destination GPU cgroup for dmabuf's charge + * + * Only tasks that belong to the same cgroup the buffer is currently charged to + * may call this function, otherwise it will return -EPERM. + * + * Returns 0 on success, or a negative errno code otherwise. + */ +int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct gpucg *gpucg) +{ +#ifdef CONFIG_CGROUP_GPU + struct gpucg *current_gpucg; + int ret; + + /* If the source and destination cgroups are the same, don't do anything. */ + current_gpucg = gpucg_get(current); + if (current_gpucg == gpucg) { + ret = 0; + goto skip_transfer; + } + + /* + * Verify that the cgroup of the process requesting the transfer is the + * same as the one the buffer is currently charged to. + */ + current_gpucg = gpucg_get(current); + mutex_lock(&dmabuf->lock); + if (current_gpucg != dmabuf->gpucg) { + ret = -EPERM; + goto err; + } + + ret = gpucg_transfer_charge(current_gpucg, gpucg, dmabuf->gpucg_dev, dmabuf->size); + if (ret) + goto err; + dmabuf->gpucg = gpucg; +err: + mutex_unlock(&dmabuf->lock); +skip_transfer: + gpucg_put(current_gpucg); + return ret; +#else + return 0; +#endif /* CONFIG_CGROUP_GPU */ +} +EXPORT_SYMBOL_NS_GPL(dma_buf_transfer_charge, DMA_BUF); + #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) { diff --git a/include/linux/cgroup_gpu.h b/include/linux/cgroup_gpu.h index c90069719022..e30f15d5e9be 100644 --- a/include/linux/cgroup_gpu.h +++ b/include/linux/cgroup_gpu.h @@ -87,6 +87,10 @@ static inline struct gpucg *gpucg_parent(struct gpucg *cg)
int gpucg_try_charge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage); void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage); +int gpucg_transfer_charge(struct gpucg *source, + struct gpucg *dest, + struct gpucg_device *device, + u64 usage); void gpucg_register_device(struct gpucg_device *gpucg_dev, const char *name); #else /* CONFIG_CGROUP_GPU */
@@ -121,6 +125,14 @@ static inline void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage) {}
+static inline int gpucg_transfer_charge(struct gpucg *source, + struct gpucg *dest, + struct gpucg_device *device, + u64 usage) +{ + return 0; +} + static inline void gpucg_register_device(struct gpucg_device *gpucg_dev, const char *name) {} #endif /* CONFIG_CGROUP_GPU */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 742f29c3daaf..646827156213 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -646,4 +646,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); + +int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct gpucg *gpucg); #endif /* __DMA_BUF_H__ */ diff --git a/kernel/cgroup/gpu.c b/kernel/cgroup/gpu.c index ac4c470914b5..40531323d6da 100644 --- a/kernel/cgroup/gpu.c +++ b/kernel/cgroup/gpu.c @@ -247,6 +247,65 @@ void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_device *device, u64 usage) css_put_many(&gpucg->css, nr_pages); }
+/** + * gpucg_transfer_charge - Transfer a GPU charge from one cgroup to another. + * @source: [in] The GPU cgroup the charge will be transferred from. + * @dest: [in] The GPU cgroup the charge will be transferred to. + * @device: [in] The GPU cgroup device corresponding to the charge. + * @usage: [in] The size of the memory in bytes. + * + * Returns 0 on success, or a negative errno code otherwise. + */ +int gpucg_transfer_charge(struct gpucg *source, + struct gpucg *dest, + struct gpucg_device *device, + u64 usage) +{ + struct page_counter *counter; + u64 nr_pages; + struct gpucg_resource_pool *rp_source, *rp_dest; + int ret = 0; + + nr_pages = PAGE_ALIGN(usage) >> PAGE_SHIFT; + + mutex_lock(&gpucg_mutex); + rp_source = find_cg_rpool_locked(source, device); + if (unlikely(!rp_source)) { + ret = -ENOENT; + goto exit_early; + } + + rp_dest = get_cg_rpool_locked(dest, device); + if (IS_ERR(rp_dest)) { + ret = PTR_ERR(rp_dest); + goto exit_early; + } + + /* + * First uncharge from the pool it's currently charged to. This ordering avoids double + * charging while the transfer is in progress, which could cause us to hit a limit. + * If the try_charge fails for this transfer, we need to be able to reverse this uncharge, + * so we continue to hold the gpucg_mutex here. + */ + page_counter_uncharge(&rp_source->total, nr_pages); + css_put_many(&source->css, nr_pages); + + /* Now attempt the new charge */ + if (page_counter_try_charge(&rp_dest->total, nr_pages, &counter)) { + css_get_many(&dest->css, nr_pages); + } else { + /* + * The new charge failed, so reverse the uncharge from above. This should always + * succeed since charges on source are blocked by gpucg_mutex. + */ + WARN_ON(!page_counter_try_charge(&rp_source->total, nr_pages, &counter)); + css_get_many(&source->css, nr_pages); + } +exit_early: + mutex_unlock(&gpucg_mutex); + return ret; +} + /** * gpucg_register_device - Registers a device for memory accounting using the * GPU cgroup controller.