Hi
On 28.04.2020 16:27, Tvrtko Ursulin wrote:
On 28/04/2020 14:19, Marek Szyprowski wrote:
The Documentation/DMA-API-HOWTO.txt states that dma_map_sg returns the numer of the created entries in the DMA address space. However the subsequent calls to dma_sync_sg_for_{device,cpu} and dma_unmap_sg must be called with the original number of entries passed to dma_map_sg. The sg_table->nents in turn holds the result of the dma_map_sg call as stated in include/linux/scatterlist.h. Adapt the code to obey those rules.
Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 13 +++++++------ drivers/gpu/drm/i915/gem/i915_gem_internal.c | 4 ++-- drivers/gpu/drm/i915/gem/i915_gem_region.c | 4 ++-- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 5 +++-- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 10 +++++----- drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c | 5 +++-- drivers/gpu/drm/i915/gt/intel_ggtt.c | 12 ++++++------ drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++++++----- drivers/gpu/drm/i915/i915_scatterlist.c | 4 ++-- drivers/gpu/drm/i915/selftests/scatterlist.c | 8 ++++---- 10 files changed, 41 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 7db5a79..d829852 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -36,21 +36,22 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme goto err_unpin_pages; } - ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); + ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL); if (ret) goto err_free; src = obj->mm.pages->sgl; dst = st->sgl; - for (i = 0; i < obj->mm.pages->nents; i++) { + for (i = 0; i < obj->mm.pages->orig_nents; i++) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); } - if (!dma_map_sg_attrs(attachment->dev, - st->sgl, st->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { + st->nents = dma_map_sg_attrs(attachment->dev, + st->sgl, st->orig_nents, dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (!st->nents) { ret = -ENOMEM; goto err_free_sg; } @@ -74,7 +75,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); dma_unmap_sg_attrs(attachment->dev, - sg->sgl, sg->nents, dir, + sg->sgl, sg->orig_nents, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sg); kfree(sg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index cbbff81..a8ebfdd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -73,7 +73,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) } sg = st->sgl; - st->nents = 0; + st->nents = st->orig_nents = 0; sg_page_sizes = 0; do { @@ -94,7 +94,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg_set_page(sg, page, PAGE_SIZE << order, 0); sg_page_sizes |= PAGE_SIZE << order; - st->nents++; + st->nents = st->orig_nents = st->nents + 1; npages -= 1 << order; if (!npages) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 1515384..58ca560 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -53,7 +53,7 @@ GEM_BUG_ON(list_empty(blocks)); sg = st->sgl; - st->nents = 0; + st->nents = st->orig_nents = 0; sg_page_sizes = 0; prev_end = (resource_size_t)-1; @@ -78,7 +78,7 @@ sg->length = block_size; - st->nents++; + st->nents = st->orig_nents = st->nents + 1; } else { sg->length += block_size; sg_dma_len(sg) += block_size; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 5d5d7ee..851a732 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -80,7 +80,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) noreclaim |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; - st->nents = 0; + st->nents = st->orig_nents = 0; sg_page_sizes = 0; for (i = 0; i < page_count; i++) { const unsigned int shrink[] = { @@ -140,7 +140,8 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) sg_page_sizes |= sg->length; sg = sg_next(sg); } - st->nents++; + st->nents = st->orig_nents = st->nents + 1;
A bit higher up, not shown in the patch, we have allocated a table via sg_alloc_table giving it a pessimistic max nents, sometimes much larger than the st->nents this loops will create. But orig_nents has been now been overwritten. Will that leak memory come sg_table_free?
Indeed this will leak memory. I'm missed that sg_trim() will adjust nents and orig_nents.
I will drop those changes as they are only a creative (or hacky) way of using sg_table and scatterlists.
As minimum it will nerf our i915_sg_trim optimization a bit lower down, also not shown in the diff.
[...]
Best regards
linaro-mm-sig@lists.linaro.org