Hi Bobby,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 255d75ef029f33f75fcf5015052b7302486f7ad2]
url: https://github.com/intel-lab-lkp/linux/commits/Bobby-Eshleman/net-devmem-ren... base: 255d75ef029f33f75fcf5015052b7302486f7ad2 patch link: https://lore.kernel.org/r/20251104-scratch-bobbyeshleman-devmem-tcp-token-up... patch subject: [PATCH net-next v6 3/6] net: devmem: prepare for autorelease rx token management config: arc-nsimosci_hs_defconfig (https://download.01.org/0day-ci/archive/20251106/202511060345.AQs0FTNg-lkp@i...) compiler: arc-linux-gcc (GCC) 15.1.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251106/202511060345.AQs0FTNg-lkp@i...)
If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot lkp@intel.com | Closes: https://lore.kernel.org/oe-kbuild-all/202511060345.AQs0FTNg-lkp@intel.com/
All warnings (new ones prefixed by >>):
net/ipv4/tcp.c: In function 'tcp_recvmsg_dmabuf':
net/ipv4/tcp.c:2661:12: warning: 'refs' is used uninitialized [-Wuninitialized]
2661 | if (refs > 0) | ^ net/ipv4/tcp.c:2496:13: note: 'refs' was declared here 2496 | int refs; | ^~~~
vim +/refs +2661 net/ipv4/tcp.c
2481 2482 /* On error, returns the -errno. On success, returns number of bytes sent to the 2483 * user. May not consume all of @remaining_len. 2484 */ 2485 static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, 2486 unsigned int offset, struct msghdr *msg, 2487 int remaining_len) 2488 { 2489 struct net_devmem_dmabuf_binding *binding = NULL; 2490 struct dmabuf_cmsg dmabuf_cmsg = { 0 }; 2491 struct tcp_xa_pool tcp_xa_pool; 2492 unsigned int start; 2493 int i, copy, n; 2494 int sent = 0; 2495 int err = 0; 2496 int refs; 2497 2498 tcp_xa_pool.max = 0; 2499 tcp_xa_pool.idx = 0; 2500 do { 2501 start = skb_headlen(skb); 2502 2503 if (skb_frags_readable(skb)) { 2504 err = -ENODEV; 2505 goto out; 2506 } 2507 2508 /* Copy header. */ 2509 copy = start - offset; 2510 if (copy > 0) { 2511 copy = min(copy, remaining_len); 2512 2513 n = copy_to_iter(skb->data + offset, copy, 2514 &msg->msg_iter); 2515 if (n != copy) { 2516 err = -EFAULT; 2517 goto out; 2518 } 2519 2520 offset += copy; 2521 remaining_len -= copy; 2522 2523 /* First a dmabuf_cmsg for # bytes copied to user 2524 * buffer. 2525 */ 2526 memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg)); 2527 dmabuf_cmsg.frag_size = copy; 2528 err = put_cmsg_notrunc(msg, SOL_SOCKET, 2529 SO_DEVMEM_LINEAR, 2530 sizeof(dmabuf_cmsg), 2531 &dmabuf_cmsg); 2532 if (err) 2533 goto out; 2534 2535 sent += copy; 2536 2537 if (remaining_len == 0) 2538 goto out; 2539 } 2540 2541 /* after that, send information of dmabuf pages through a 2542 * sequence of cmsg 2543 */ 2544 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2545 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2546 struct net_iov *niov; 2547 u64 frag_offset; 2548 u32 token; 2549 int end; 2550 2551 /* !skb_frags_readable() should indicate that ALL the 2552 * frags in this skb are dmabuf net_iovs. We're checking 2553 * for that flag above, but also check individual frags 2554 * here. If the tcp stack is not setting 2555 * skb_frags_readable() correctly, we still don't want 2556 * to crash here. 2557 */ 2558 if (!skb_frag_net_iov(frag)) { 2559 net_err_ratelimited("Found non-dmabuf skb with net_iov"); 2560 err = -ENODEV; 2561 goto out; 2562 } 2563 2564 niov = skb_frag_net_iov(frag); 2565 if (!net_is_devmem_iov(niov)) { 2566 err = -ENODEV; 2567 goto out; 2568 } 2569 2570 end = start + skb_frag_size(frag); 2571 copy = end - offset; 2572 2573 if (copy > 0) { 2574 copy = min(copy, remaining_len); 2575 2576 frag_offset = net_iov_virtual_addr(niov) + 2577 skb_frag_off(frag) + offset - 2578 start; 2579 dmabuf_cmsg.frag_offset = frag_offset; 2580 dmabuf_cmsg.frag_size = copy; 2581 2582 binding = net_devmem_iov_binding(niov); 2583 2584 if (!sk->sk_devmem_info.binding) 2585 sk->sk_devmem_info.binding = binding; 2586 2587 if (sk->sk_devmem_info.binding != binding) { 2588 err = -EFAULT; 2589 goto out; 2590 } 2591 2592 if (sk->sk_devmem_info.autorelease) { 2593 err = tcp_xa_pool_refill(sk, &tcp_xa_pool, 2594 skb_shinfo(skb)->nr_frags - i); 2595 if (err) 2596 goto out; 2597 2598 dmabuf_cmsg.frag_token = 2599 tcp_xa_pool.tokens[tcp_xa_pool.idx]; 2600 } else { 2601 token = net_iov_virtual_addr(niov) >> PAGE_SHIFT; 2602 dmabuf_cmsg.frag_token = token; 2603 } 2604 2605 2606 /* Will perform the exchange later */ 2607 dmabuf_cmsg.dmabuf_id = net_devmem_iov_binding_id(niov); 2608 2609 offset += copy; 2610 remaining_len -= copy; 2611 2612 err = put_cmsg_notrunc(msg, SOL_SOCKET, 2613 SO_DEVMEM_DMABUF, 2614 sizeof(dmabuf_cmsg), 2615 &dmabuf_cmsg); 2616 if (err) 2617 goto out; 2618 2619 if (sk->sk_devmem_info.autorelease) { 2620 atomic_long_inc(&niov->pp_ref_count); 2621 tcp_xa_pool.netmems[tcp_xa_pool.idx++] = 2622 skb_frag_netmem(frag); 2623 } else { 2624 if (atomic_inc_return(&niov->uref) == 1) 2625 atomic_long_inc(&niov->pp_ref_count); 2626 refs++; 2627 } 2628 2629 sent += copy; 2630 2631 if (remaining_len == 0) 2632 goto out; 2633 } 2634 start = end; 2635 } 2636 2637 tcp_xa_pool_commit(sk, &tcp_xa_pool); 2638 2639 if (!remaining_len) 2640 goto out; 2641 2642 /* if remaining_len is not satisfied yet, we need to go to the 2643 * next frag in the frag_list to satisfy remaining_len. 2644 */ 2645 skb = skb_shinfo(skb)->frag_list ?: skb->next; 2646 2647 offset = offset - start; 2648 } while (skb); 2649 2650 if (remaining_len) { 2651 err = -EFAULT; 2652 goto out; 2653 } 2654 2655 out: 2656 tcp_xa_pool_commit(sk, &tcp_xa_pool); 2657 2658 if (!sent) 2659 sent = err; 2660
2661 if (refs > 0)
2662 atomic_add(refs, &sk->sk_devmem_info.outstanding_urefs); 2663 2664 return sent; 2665 } 2666