On Thu, 7 Dec 2023 16:52:39 -0800, Mina Almasry wrote: <...>
+static int mp_dmabuf_devmem_init(struct page_pool *pool) +{
- struct netdev_dmabuf_binding *binding = pool->mp_priv;
- if (!binding)
return -EINVAL;
- if (!(pool->p.flags & PP_FLAG_DMA_MAP))
return -EOPNOTSUPP;
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
return -EOPNOTSUPP;
- netdev_dmabuf_binding_get(binding);
- return 0;
+}
+static struct page *mp_dmabuf_devmem_alloc_pages(struct page_pool *pool,
gfp_t gfp)
+{
- struct netdev_dmabuf_binding *binding = pool->mp_priv;
- struct page_pool_iov *ppiov;
- ppiov = netdev_alloc_dmabuf(binding);
Since it only supports one-page allocation, we'd better add a check in `ops->init()` that `pool->p.order` must be 0.
- if (!ppiov)
return NULL;
- ppiov->pp = pool;
- pool->pages_state_hold_cnt++;
- trace_page_pool_state_hold(pool, (struct page *)ppiov,
pool->pages_state_hold_cnt);
- return (struct page *)((unsigned long)ppiov | PP_IOV);
+}
<...>