some nits
On Wed, Apr 23, 2025 at 03:11:11AM +0000, Mina Almasry wrote:
@@ -189,43 +200,44 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, } binding->dev = dev;
- err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
binding, xa_limit_32b, &id_alloc_next,
GFP_KERNEL);
- if (err < 0)
goto err_free_binding;
- xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
- refcount_set(&binding->ref, 1);
- binding->dmabuf = dmabuf;
given you keep iterating, don't tweak whitespace in the same patch- will make the review a tiny bit easier.
binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); if (IS_ERR(binding->attachment)) { err = PTR_ERR(binding->attachment); NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
goto err_free_id;
}goto err_free_binding;
binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
DMA_FROM_DEVICE);
if (IS_ERR(binding->sgt)) { err = PTR_ERR(binding->sgt); NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment"); goto err_detach; }direction);
- if (direction == DMA_TO_DEVICE) {
binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
sizeof(struct net_iov *),
GFP_KERNEL);
if (!binding->tx_vec) {
err = -ENOMEM;
goto err_unmap;
}
- }
- /* For simplicity we expect to make PAGE_SIZE allocations, but the
*/
- binding can be much more flexible than that. We may be able to
- allocate MTU sized chunks here. Leave that for future work...
- binding->chunk_pool =
gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
- binding->chunk_pool = gen_pool_create(PAGE_SHIFT,
if (!binding->chunk_pool) { err = -ENOMEM;dev_to_node(&dev->dev));
goto err_unmap;
}goto err_tx_vec;
virtual = 0; @@ -270,24 +282,34 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, niov->owner = &owner->area; page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), net_devmem_get_dma_addr(niov));
if (direction == DMA_TO_DEVICE)
}binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
virtual += len; }
- err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
binding, xa_limit_32b, &id_alloc_next,
GFP_KERNEL);
- if (err < 0)
goto err_free_id;
- return binding;
+err_free_id:
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
err_free_chunks: gen_pool_for_each_chunk(binding->chunk_pool, net_devmem_dmabuf_free_chunk_owner, NULL); gen_pool_destroy(binding->chunk_pool); +err_tx_vec:
- kvfree(binding->tx_vec);
err_unmap: dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, DMA_FROM_DEVICE); err_detach: dma_buf_detach(dmabuf, binding->attachment); -err_free_id:
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
err_free_binding: kfree(binding); err_put_dmabuf: @@ -295,6 +317,21 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, return ERR_PTR(err); } +struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id) +{
- struct net_devmem_dmabuf_binding *binding;
- rcu_read_lock();
- binding = xa_load(&net_devmem_dmabuf_bindings, id);
- if (binding) {
if (!net_devmem_dmabuf_binding_get(binding))
binding = NULL;
- }
- rcu_read_unlock();
- return binding;
+}
void net_devmem_get_net_iov(struct net_iov *niov) { net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov)); @@ -305,6 +342,53 @@ void net_devmem_put_net_iov(struct net_iov *niov) net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov)); } +struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
unsigned int dmabuf_id)
+{
- struct net_devmem_dmabuf_binding *binding;
- struct dst_entry *dst = __sk_dst_get(sk);
- int err = 0;
- binding = net_devmem_lookup_dmabuf(dmabuf_id);
why not initialize binding together with the declaration?
- if (!binding || !binding->tx_vec) {
err = -EINVAL;
goto out_err;
- }
- /* The dma-addrs in this binding are only reachable to the corresponding
* net_device.
*/
- if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
err = -ENODEV;
goto out_err;
- }
- return binding;
+out_err:
- if (binding)
net_devmem_dmabuf_binding_put(binding);
- return ERR_PTR(err);
+}
+struct net_iov * +net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding,
size_t virt_addr, size_t *off, size_t *size)
+{
- size_t idx;
- if (virt_addr >= binding->dmabuf->size)
return NULL;
- idx = virt_addr / PAGE_SIZE;
init this at where it's declared? or where it's used.
- *off = virt_addr % PAGE_SIZE;
- *size = PAGE_SIZE - *off;
- return binding->tx_vec[idx];
+}
/*** "Dmabuf devmem memory provider" ***/ int mp_dmabuf_devmem_init(struct page_pool *pool)