This debugging hack is important to enforce the rule that importers should *never* touch the underlying struct page of the exporter.
Instead of just mangling the page link create a copy of the sg_table but only copy over the DMA addresses and not the pages.
This will cause a NULL pointer de-reference if the importer tries to touch the struct page. Still quite a hack but this at least allows the exporter to properly keeps it's sg_table intact while allowing the DMA-buf maintainer to find and fix misbehaving importers and finally switch over to using a different data structure in the future.
v2: improve the hack further by using a wrapper structure and explaining the background a bit more in the commit message. v3: fix some whitespace issues, use sg_assign_page().
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com (v1) --- drivers/dma-buf/dma-buf.c | 74 +++++++++++++++++++++++++++++++-------- 1 file changed, 60 insertions(+), 14 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2305bb2cc1f1..944f4103b5cc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -35,6 +35,12 @@
#include "dma-buf-sysfs-stats.h"
+/* Wrapper to hide the sg_table page link from the importer */ +struct dma_buf_sg_table_wrapper { + struct sg_table *original; + struct sg_table wrapper; +}; + static inline int is_dma_buf_file(struct file *);
static DEFINE_MUTEX(dmabuf_list_mutex); @@ -828,21 +834,59 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
-static void mangle_sg_table(struct sg_table *sg_table) +static int dma_buf_mangle_sg_table(struct sg_table **sg_table) { -#ifdef CONFIG_DMABUF_DEBUG - int i; - struct scatterlist *sg; - - /* To catch abuse of the underlying struct page by importers mix - * up the bits, but take care to preserve the low SG_ bits to - * not corrupt the sgt. The mixing is undone on unmap - * before passing the sgt back to the exporter. + struct scatterlist *to_sg, *from_sg; + struct sg_table *from = *sg_table; + struct dma_buf_sg_table_wrapper *to; + int i, ret; + + if (!IS_ENABLED(CONFIG_DMABUF_DEBUG)) + return 0; + + /* + * To catch abuse of the underlying struct page by importers copy the + * sg_table without copying the page_link and give only the copy back to + * the importer. */ - for_each_sgtable_sg(sg_table, sg, i) - sg->page_link ^= ~0xffUL; -#endif + to = kzalloc(sizeof(*to), GFP_KERNEL); + if (!to) + return -ENOMEM; + + ret = sg_alloc_table(&to->wrapper, from->nents, GFP_KERNEL); + if (ret) + goto free_to; + + to_sg = to->wrapper.sgl; + for_each_sgtable_dma_sg(from, from_sg, i) { + to_sg->offset = 0; + to_sg->length = 0; + sg_assign_page(to_sg, NULL); + sg_dma_address(to_sg) = sg_dma_address(from_sg); + sg_dma_len(to_sg) = sg_dma_len(from_sg); + to_sg = sg_next(to_sg); + }
+ to->original = from; + *sg_table = &to->wrapper; + return 0; + +free_to: + kfree(to); + return ret; +} + +static void dma_buf_demangle_sg_table(struct sg_table **sg_table) +{ + struct dma_buf_sg_table_wrapper *copy; + + if (!IS_ENABLED(CONFIG_DMABUF_DEBUG)) + return; + + copy = container_of(*sg_table, typeof(*copy), wrapper); + *sg_table = copy->original; + sg_free_table(©->wrapper); + kfree(copy); }
static inline bool @@ -1139,7 +1183,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (ret < 0) goto error_unmap; } - mangle_sg_table(sg_table); + ret = dma_buf_mangle_sg_table(&sg_table); + if (ret) + goto error_unmap;
if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg; @@ -1220,7 +1266,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- mangle_sg_table(sg_table); + dma_buf_demangle_sg_table(&sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_pin_on_map(attach))
-----Original Message----- From: dri-devel dri-devel-bounces@lists.freedesktop.org On Behalf Of Christian König Sent: Friday, December 5, 2025 8:06 AM To: Auld, Matthew matthew.auld@intel.com; linux-media@vger.kernel.org; dri-devel@lists.freedesktop.org; linaro-mm-sig@lists.linaro.org; simona.vetter@ffwll.ch; Ruhl, Michael J michael.j.ruhl@intel.com Subject: [PATCH 1/2] dma-buf: improve sg_table debugging hack v3
This debugging hack is important to enforce the rule that importers should *never* touch the underlying struct page of the exporter.
Instead of just mangling the page link create a copy of the sg_table but only copy over the DMA addresses and not the pages.
This will cause a NULL pointer de-reference if the importer tries to touch the struct page. Still quite a hack but this at least allows the exporter to properly keeps it's sg_table intact while allowing the DMA-buf maintainer to find and fix misbehaving importers and finally switch over to using a different data structure in the future.
v2: improve the hack further by using a wrapper structure and explaining the background a bit more in the commit message. v3: fix some whitespace issues, use sg_assign_page().
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com (v1)
drivers/dma-buf/dma-buf.c | 74 +++++++++++++++++++++++++++++++-----
1 file changed, 60 insertions(+), 14 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 2305bb2cc1f1..944f4103b5cc 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -35,6 +35,12 @@
#include "dma-buf-sysfs-stats.h"
+/* Wrapper to hide the sg_table page link from the importer */ +struct dma_buf_sg_table_wrapper {
- struct sg_table *original;
- struct sg_table wrapper;
+};
static inline int is_dma_buf_file(struct file *);
static DEFINE_MUTEX(dmabuf_list_mutex); @@ -828,21 +834,59 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
-static void mangle_sg_table(struct sg_table *sg_table) +static int dma_buf_mangle_sg_table(struct sg_table **sg_table)
you are not really mangling this anymore...
dma_buf_clone_sg_table() dma_buf_unclone_sg_table()
maybe?
{ -#ifdef CONFIG_DMABUF_DEBUG
- int i;
- struct scatterlist *sg;
- /* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to* not corrupt the sgt. The mixing is undone on unmap* before passing the sgt back to the exporter.
- struct scatterlist *to_sg, *from_sg;
- struct sg_table *from = *sg_table;
- struct dma_buf_sg_table_wrapper *to;
- int i, ret;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return 0;- /*
* To catch abuse of the underlying struct page by importers copy the* sg_table without copying the page_link and give only the copy backto
*/* the importer.
- for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;-#endif
- to = kzalloc(sizeof(*to), GFP_KERNEL);
- if (!to)
return -ENOMEM;- ret = sg_alloc_table(&to->wrapper, from->nents, GFP_KERNEL);
- if (ret)
goto free_to;- to_sg = to->wrapper.sgl;
- for_each_sgtable_dma_sg(from, from_sg, i) {
to_sg->offset = 0;to_sg->length = 0;sg_assign_page(to_sg, NULL);
sg_set_page(to_sg, NULL, 0, 0); ?
Just thoughts... This looks reasonable to me.
With or without these changes:
Reviewed-by: Michael J. Ruhl michael.j.ruhl@intel.com
M
sg_dma_address(to_sg) = sg_dma_address(from_sg);sg_dma_len(to_sg) = sg_dma_len(from_sg);to_sg = sg_next(to_sg);}
to->original = from;
*sg_table = &to->wrapper;
return 0;
+free_to:
- kfree(to);
- return ret;
+}
+static void dma_buf_demangle_sg_table(struct sg_table **sg_table) +{
- struct dma_buf_sg_table_wrapper *copy;
- if (!IS_ENABLED(CONFIG_DMABUF_DEBUG))
return;- copy = container_of(*sg_table, typeof(*copy), wrapper);
- *sg_table = copy->original;
- sg_free_table(©->wrapper);
- kfree(copy);
}
static inline bool @@ -1139,7 +1183,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, if (ret < 0) goto error_unmap; }
- mangle_sg_table(sg_table);
ret = dma_buf_mangle_sg_table(&sg_table);
if (ret)
goto error_unmap;if (IS_ENABLED(CONFIG_DMA_API_DEBUG)) { struct scatterlist *sg;
@@ -1220,7 +1266,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- mangle_sg_table(sg_table);
dma_buf_demangle_sg_table(&sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_pin_on_map(attach))
-- 2.43.0
linaro-mm-sig@lists.linaro.org