Prepare for the restricted heap to reuse, move it out from system_heap.c. To keep the function name consistent, rename it to sg_dup_table.
Cc: Andrew Morton akpm@linux-foundation.org Signed-off-by: Yong Wu yong.wu@mediatek.com --- drivers/dma-buf/heaps/system_heap.c | 27 +-------------------------- include/linux/scatterlist.h | 2 ++ lib/scatterlist.c | 26 ++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 26 deletions(-)
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 9076d47ed2ef..204e55f92330 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -54,31 +54,6 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; static const unsigned int orders[] = {8, 4, 0}; #define NUM_ORDERS ARRAY_SIZE(orders)
-static struct sg_table *dup_sg_table(struct sg_table *table) -{ - struct sg_table *new_table; - int ret, i; - struct scatterlist *sg, *new_sg; - - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } - - new_sg = new_table->sgl; - for_each_sgtable_sg(table, sg, i) { - sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); - new_sg = sg_next(new_sg); - } - - return new_table; -} - static int system_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { @@ -90,7 +65,7 @@ static int system_heap_attach(struct dma_buf *dmabuf, if (!a) return -ENOMEM;
- table = dup_sg_table(&buffer->sg_table); + table = sg_dup_table(&buffer->sg_table); if (IS_ERR(table)) { kfree(a); return -ENOMEM; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index a6ad9018eca0..53a4cdc11f4f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -538,6 +538,8 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, size_t buflen, off_t skip);
+struct sg_table *sg_dup_table(struct sg_table *table); + /* * Maximum number of entries that will be allocated in one piece, if * a list larger than this is required then chaining will be utilized. diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 7bc2220fea80..3efcf728c13b 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -1100,6 +1100,32 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, } EXPORT_SYMBOL(sg_zero_buffer);
+struct sg_table *sg_dup_table(struct sg_table *table) +{ + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; + + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; +} +EXPORT_SYMBOL(sg_dup_table); + /* * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class * iterators, and add them to the scatterlist.