dmabuf dma-addresses should not be dma_sync'd for CPU/device. Typically its the driver responsibility to dma_sync for CPU, but the driver should not dma_sync for CPU if the netmem is actually coming from a dmabuf memory provider.
The page_pool already exposes a helper for dma_sync_for_cpu: page_pool_dma_sync_for_cpu. Upgrade this existing helper to handle netmem, and have it skip dma_sync if the memory is from a dmabuf memory provider. Drivers should migrate to using this helper when adding support for netmem.
Cc: Jason Gunthorpe jgg@ziepe.ca Signed-off-by: Mina Almasry almasrymina@google.com
--- include/net/page_pool/helpers.h | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 8e548ff3044c..ad4fed4a791c 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -429,9 +429,10 @@ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) }
/** - * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW + * page_pool_dma_sync_netmem_for_cpu - sync Rx page for CPU after it's written + * by HW * @pool: &page_pool the @page belongs to - * @page: page to sync + * @netmem: netmem to sync * @offset: offset from page start to "hard" start if using PP frags * @dma_sync_size: size of the data written to the page * @@ -440,16 +441,28 @@ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) * Note that this version performs DMA sync unconditionally, even if the * associated PP doesn't perform sync-for-device. */ -static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, - const struct page *page, - u32 offset, u32 dma_sync_size) +static inline void +page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool, + const netmem_ref netmem, u32 offset, + u32 dma_sync_size) { + if (pool->mp_priv) + return; + dma_sync_single_range_for_cpu(pool->p.dev, - page_pool_get_dma_addr(page), + page_pool_get_dma_addr_netmem(netmem), offset + pool->p.offset, dma_sync_size, page_pool_get_dma_dir(pool)); }
+static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, + struct page *page, u32 offset, + u32 dma_sync_size) +{ + page_pool_dma_sync_netmem_for_cpu(pool, page_to_netmem(page), offset, + dma_sync_size); +} + static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt);