From: Yang Yingliang yangyingliang@huawei.com
[ Upstream commit 996af62167d0e0ec69b938a3561e96f84ffff1aa ]
I got kmemleak report when doing fuzz test:
BUG: memory leak unreferenced object 0xffff88810c239500 (size 64): comm "syz-executor940", pid 882, jiffies 4294712870 (age 14.631s) hex dump (first 32 bytes): 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 01 00 00 00 01 02 00 04 ................ backtrace: [<00000000a323afa4>] slab_alloc_node mm/slub.c:2972 [inline] [<00000000a323afa4>] slab_alloc mm/slub.c:2980 [inline] [<00000000a323afa4>] __kmalloc+0x167/0x340 mm/slub.c:4130 [<000000005034ca11>] kmalloc include/linux/slab.h:595 [inline] [<000000005034ca11>] mrp_attr_create net/802/mrp.c:276 [inline] [<000000005034ca11>] mrp_request_join+0x265/0x550 net/802/mrp.c:530 [<00000000fcfd81f3>] vlan_mvrp_request_join+0x145/0x170 net/8021q/vlan_mvrp.c:40 [<000000009258546e>] vlan_dev_open+0x477/0x890 net/8021q/vlan_dev.c:292 [<0000000059acd82b>] __dev_open+0x281/0x410 net/core/dev.c:1609 [<000000004e6dc695>] __dev_change_flags+0x424/0x560 net/core/dev.c:8767 [<00000000471a09af>] rtnl_configure_link+0xd9/0x210 net/core/rtnetlink.c:3122 [<0000000037a4672b>] __rtnl_newlink+0xe08/0x13e0 net/core/rtnetlink.c:3448 [<000000008d5d0fda>] rtnl_newlink+0x64/0xa0 net/core/rtnetlink.c:3488 [<000000004882fe39>] rtnetlink_rcv_msg+0x369/0xa10 net/core/rtnetlink.c:5552 [<00000000907e6c54>] netlink_rcv_skb+0x134/0x3d0 net/netlink/af_netlink.c:2504 [<00000000e7d7a8c4>] netlink_unicast_kernel net/netlink/af_netlink.c:1314 [inline] [<00000000e7d7a8c4>] netlink_unicast+0x4a0/0x6a0 net/netlink/af_netlink.c:1340 [<00000000e0645d50>] netlink_sendmsg+0x78e/0xc90 net/netlink/af_netlink.c:1929 [<00000000c24559b7>] sock_sendmsg_nosec net/socket.c:654 [inline] [<00000000c24559b7>] sock_sendmsg+0x139/0x170 net/socket.c:674 [<00000000fc210bc2>] ____sys_sendmsg+0x658/0x7d0 net/socket.c:2350 [<00000000be4577b5>] ___sys_sendmsg+0xf8/0x170 net/socket.c:2404
Calling mrp_request_leave() after mrp_request_join(), the attr->state is set to MRP_APPLICANT_VO, mrp_attr_destroy() won't be called in last TX event in mrp_uninit_applicant(), the attr of applicant will be leaked. To fix this leak, iterate and free each attr of applicant before rerturning from mrp_uninit_applicant().
Reported-by: Hulk Robot hulkci@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/802/mrp.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/net/802/mrp.c b/net/802/mrp.c index bea6e43d45a0..35e04cc5390c 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) kfree(attr); }
+static void mrp_attr_destroy_all(struct mrp_applicant *app) +{ + struct rb_node *node, *next; + struct mrp_attr *attr; + + for (node = rb_first(&app->mad); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct mrp_attr, node); + mrp_attr_destroy(app, attr); + } +} + static int mrp_pdu_init(struct mrp_applicant *app) { struct sk_buff *skb; @@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
spin_lock_bh(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); + mrp_attr_destroy_all(app); mrp_pdu_queue(app); spin_unlock_bh(&app->lock);
From: Yang Yingliang yangyingliang@huawei.com
[ Upstream commit 42ca63f980842918560b25f0244307fd83b4777c ]
I got kmemleak report when doing fuzz test:
BUG: memory leak unreferenced object 0xffff88810c909b80 (size 64): comm "syz", pid 957, jiffies 4295220394 (age 399.090s) hex dump (first 32 bytes): 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 00 00 00 00 00 00 00 08 00 00 00 01 02 00 04 ................ backtrace: [<00000000ca1f2e2e>] garp_request_join+0x285/0x3d0 [<00000000bf153351>] vlan_gvrp_request_join+0x15b/0x190 [<0000000024005e72>] vlan_dev_open+0x706/0x980 [<00000000dc20c4d4>] __dev_open+0x2bb/0x460 [<0000000066573004>] __dev_change_flags+0x501/0x650 [<0000000035b42f83>] rtnl_configure_link+0xee/0x280 [<00000000a5e69de0>] __rtnl_newlink+0xed5/0x1550 [<00000000a5258f4a>] rtnl_newlink+0x66/0x90 [<00000000506568ee>] rtnetlink_rcv_msg+0x439/0xbd0 [<00000000b7eaeae1>] netlink_rcv_skb+0x14d/0x420 [<00000000c373ce66>] netlink_unicast+0x550/0x750 [<00000000ec74ce74>] netlink_sendmsg+0x88b/0xda0 [<00000000381ff246>] sock_sendmsg+0xc9/0x120 [<000000008f6a2db3>] ____sys_sendmsg+0x6e8/0x820 [<000000008d9c1735>] ___sys_sendmsg+0x145/0x1c0 [<00000000aa39dd8b>] __sys_sendmsg+0xfe/0x1d0
Calling garp_request_leave() after garp_request_join(), the attr->state is set to GARP_APPLICANT_VO, garp_attr_destroy() won't be called in last transmit event in garp_uninit_applicant(), the attr of applicant will be leaked. To fix this leak, iterate and free each attr of applicant before rerturning from garp_uninit_applicant().
Reported-by: Hulk Robot hulkci@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/802/garp.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/net/802/garp.c b/net/802/garp.c index 400bd857e5f5..f6012f8e59f0 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr kfree(attr); }
+static void garp_attr_destroy_all(struct garp_applicant *app) +{ + struct rb_node *node, *next; + struct garp_attr *attr; + + for (node = rb_first(&app->gid); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct garp_attr, node); + garp_attr_destroy(app, attr); + } +} + static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; @@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
spin_lock_bh(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); + garp_attr_destroy_all(app); garp_pdu_queue(app); spin_unlock_bh(&app->lock);
From: Eric Dumazet edumazet@google.com
[ Upstream commit 0dbffbb5335a1e3aa6855e4ee317e25e669dd302 ]
sk_ll_usec is read locklessly from sk_can_busy_loop() while another thread can change its value in sock_setsockopt()
This is correct but needs annotations.
BUG: KCSAN: data-race in __skb_try_recv_datagram / sock_setsockopt
write to 0xffff88814eb5f904 of 4 bytes by task 14011 on cpu 0: sock_setsockopt+0x1287/0x2090 net/core/sock.c:1175 __sys_setsockopt+0x14f/0x200 net/socket.c:2100 __do_sys_setsockopt net/socket.c:2115 [inline] __se_sys_setsockopt net/socket.c:2112 [inline] __x64_sys_setsockopt+0x62/0x70 net/socket.c:2112 do_syscall_64+0x4a/0x90 arch/x86/entry/common.c:47 entry_SYSCALL_64_after_hwframe+0x44/0xae
read to 0xffff88814eb5f904 of 4 bytes by task 14001 on cpu 1: sk_can_busy_loop include/net/busy_poll.h:41 [inline] __skb_try_recv_datagram+0x14f/0x320 net/core/datagram.c:273 unix_dgram_recvmsg+0x14c/0x870 net/unix/af_unix.c:2101 unix_seqpacket_recvmsg+0x5a/0x70 net/unix/af_unix.c:2067 ____sys_recvmsg+0x15d/0x310 include/linux/uio.h:244 ___sys_recvmsg net/socket.c:2598 [inline] do_recvmmsg+0x35c/0x9f0 net/socket.c:2692 __sys_recvmmsg net/socket.c:2771 [inline] __do_sys_recvmmsg net/socket.c:2794 [inline] __se_sys_recvmmsg net/socket.c:2787 [inline] __x64_sys_recvmmsg+0xcf/0x150 net/socket.c:2787 do_syscall_64+0x4a/0x90 arch/x86/entry/common.c:47 entry_SYSCALL_64_after_hwframe+0x44/0xae
value changed: 0x00000000 -> 0x00000101
Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 14001 Comm: syz-executor.3 Not tainted 5.13.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Signed-off-by: Eric Dumazet edumazet@google.com Reported-by: syzbot syzkaller@googlegroups.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- include/net/busy_poll.h | 2 +- net/core/sock.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 73af4a64a599..40296ed976a9 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void)
static inline bool sk_can_busy_loop(const struct sock *sk) { - return sk->sk_ll_usec && !signal_pending(current); + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); }
bool sk_busy_loop_end(void *p, unsigned long start_time); diff --git a/net/core/sock.c b/net/core/sock.c index 946888afef88..d27fdc2adf9c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1172,7 +1172,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, if (val < 0) ret = -EINVAL; else - sk->sk_ll_usec = val; + WRITE_ONCE(sk->sk_ll_usec, val); } break; case SO_PREFER_BUSY_POLL:
From: Xin Long lucien.xin@gmail.com
[ Upstream commit 1d11fa231cabeae09a95cb3e4cf1d9dd34e00f08 ]
The doc draft-stewart-tsvwg-sctp-ipv4-00 that restricts 198 addresses was never published. These addresses as private addresses should be allowed to use in SCTP.
As Michael Tuexen suggested, this patch is to move 198 addresses from unusable to private scope.
Reported-by: Sérgio surkamp@gmail.com Signed-off-by: Xin Long lucien.xin@gmail.com Acked-by: Marcelo Ricardo Leitner marcelo.leitner@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- include/net/sctp/constants.h | 4 +--- net/sctp/protocol.c | 3 ++- 2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 14a0d22c9113..bf23a2ed92da 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -342,8 +342,7 @@ enum { #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, - * 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -351,7 +350,6 @@ enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ - ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */ diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 6f2bbfeec3a4..603049aa4950 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -393,7 +393,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr) retval = SCTP_SCOPE_LINK; } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) || - ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { + ipv4_is_private_192(addr->v4.sin_addr.s_addr) || + ipv4_is_test_198(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_PRIVATE; } else { retval = SCTP_SCOPE_GLOBAL;
From: "Paul E. McKenney" paulmck@kernel.org
[ Upstream commit 1d10bf55d85d34eb73dd8263635f43fd72135d2d ]
As Yanfei pointed out, although invoking trc_del_holdout() is safe from the viewpoint of the integrity of the holdout list itself, the put_task_struct() invoked by trc_del_holdout() can result in use-after-free errors due to later accesses to this task_struct structure by the RCU Tasks Trace grace-period kthread.
This commit therefore removes this call to trc_del_holdout() from trc_inspect_reader() in favor of the grace-period thread's existing call to trc_del_holdout(), thus eliminating that particular class of use-after-free errors.
Reported-by: "Xu, Yanfei" yanfei.xu@windriver.com Signed-off-by: Paul E. McKenney paulmck@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/rcu/tasks.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 350ebf5051f9..71e9d625371a 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -908,10 +908,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg) in_qs = likely(!t->trc_reader_nesting); }
- // Mark as checked. Because this is called from the grace-period - // kthread, also remove the task from the holdout list. + // Mark as checked so that the grace-period kthread will + // remove it from the holdout list. t->trc_reader_checked = true; - trc_del_holdout(t);
if (in_qs) return true; // Already in quiescent state, done!!!
From: "Paul E. McKenney" paulmck@kernel.org
[ Upstream commit a9ab9cce9367a2cc02a3c7eb57a004dc0b8f380d ]
Invoking trc_del_holdout() from within trc_wait_for_one_reader() is only a performance optimization because the RCU Tasks Trace grace-period kthread will eventually do this within check_all_holdout_tasks_trace(). But it is not a particularly important performance optimization because it only applies to the grace-period kthread, of which there is but one. This commit therefore removes this invocation of trc_del_holdout() in favor of the one in check_all_holdout_tasks_trace() in the grace-period kthread.
Reported-by: "Xu, Yanfei" yanfei.xu@windriver.com Signed-off-by: Paul E. McKenney paulmck@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/rcu/tasks.h | 1 - 1 file changed, 1 deletion(-)
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 71e9d625371a..fcef5f0c60b8 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -937,7 +937,6 @@ static void trc_wait_for_one_reader(struct task_struct *t, // The current task had better be in a quiescent state. if (t == current) { t->trc_reader_checked = true; - trc_del_holdout(t); WARN_ON_ONCE(t->trc_reader_nesting); return; }
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 5796015fa968a3349027a27dcd04c71d95c53ba5 ]
When TEE target mirrors traffic to another interface, sk_buff may not have enough headroom to be processed correctly. ip_finish_output2() detect this situation for ipv4 and allocates new skb with enogh headroom. However ipv6 lacks this logic in ip_finish_output2 and it leads to skb_under_panic:
skbuff: skb_under_panic: text:ffffffffc0866ad4 len:96 put:24 head:ffff97be85e31800 data:ffff97be85e317f8 tail:0x58 end:0xc0 dev:gre0 ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:110! invalid opcode: 0000 [#1] SMP PTI CPU: 2 PID: 393 Comm: kworker/2:2 Tainted: G OE 5.13.0 #13 Hardware name: Virtuozzo KVM, BIOS 1.11.0-2.vz7.4 04/01/2014 Workqueue: ipv6_addrconf addrconf_dad_work RIP: 0010:skb_panic+0x48/0x4a Call Trace: skb_push.cold.111+0x10/0x10 ipgre_header+0x24/0xf0 [ip_gre] neigh_connected_output+0xae/0xf0 ip6_finish_output2+0x1a8/0x5a0 ip6_output+0x5c/0x110 nf_dup_ipv6+0x158/0x1000 [nf_dup_ipv6] tee_tg6+0x2e/0x40 [xt_TEE] ip6t_do_table+0x294/0x470 [ip6_tables] nf_hook_slow+0x44/0xc0 nf_hook.constprop.34+0x72/0xe0 ndisc_send_skb+0x20d/0x2e0 ndisc_send_ns+0xd1/0x210 addrconf_dad_work+0x3c8/0x540 process_one_work+0x1d1/0x370 worker_thread+0x30/0x390 kthread+0x116/0x130 ret_from_fork+0x22/0x30
Signed-off-by: Vasily Averin vvs@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/ipv6/ip6_output.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ff4f9ebcf7f6..0efcb9b04151 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; + unsigned int hh_len = LL_RESERVED_SPACE(dev); + int delta = hh_len - skb_headroom(skb); const struct in6_addr *nexthop; struct neighbour *neigh; int ret;
+ /* Be paranoid, rather than too clever. */ + if (unlikely(delta > 0) && dev->header_ops) { + /* pskb_expand_head() might crash, if skb is shared */ + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + if (likely(nskb)) { + if (skb->sk) + skb_set_owner_w(skb, skb->sk); + consume_skb(skb); + } else { + kfree_skb(skb); + } + skb = nskb; + } + if (skb && + pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { + kfree_skb(skb); + skb = NULL; + } + if (!skb) { + IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); + return -ENOMEM; + } + } + if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
this patch is incomplete, and requires following fixup https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
Please backport it too.
Thank you, Vasily Averin
On 7/23/21 6:57 AM, Sasha Levin wrote:
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 5796015fa968a3349027a27dcd04c71d95c53ba5 ]
When TEE target mirrors traffic to another interface, sk_buff may not have enough headroom to be processed correctly. ip_finish_output2() detect this situation for ipv4 and allocates new skb with enogh headroom. However ipv6 lacks this logic in ip_finish_output2 and it leads to skb_under_panic:
skbuff: skb_under_panic: text:ffffffffc0866ad4 len:96 put:24 head:ffff97be85e31800 data:ffff97be85e317f8 tail:0x58 end:0xc0 dev:gre0 ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:110! invalid opcode: 0000 [#1] SMP PTI CPU: 2 PID: 393 Comm: kworker/2:2 Tainted: G OE 5.13.0 #13 Hardware name: Virtuozzo KVM, BIOS 1.11.0-2.vz7.4 04/01/2014 Workqueue: ipv6_addrconf addrconf_dad_work RIP: 0010:skb_panic+0x48/0x4a Call Trace: skb_push.cold.111+0x10/0x10 ipgre_header+0x24/0xf0 [ip_gre] neigh_connected_output+0xae/0xf0 ip6_finish_output2+0x1a8/0x5a0 ip6_output+0x5c/0x110 nf_dup_ipv6+0x158/0x1000 [nf_dup_ipv6] tee_tg6+0x2e/0x40 [xt_TEE] ip6t_do_table+0x294/0x470 [ip6_tables] nf_hook_slow+0x44/0xc0 nf_hook.constprop.34+0x72/0xe0 ndisc_send_skb+0x20d/0x2e0 ndisc_send_ns+0xd1/0x210 addrconf_dad_work+0x3c8/0x540 process_one_work+0x1d1/0x370 worker_thread+0x30/0x390 kthread+0x116/0x130 ret_from_fork+0x22/0x30
Signed-off-by: Vasily Averin vvs@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org
net/ipv6/ip6_output.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ff4f9ebcf7f6..0efcb9b04151 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev;
- unsigned int hh_len = LL_RESERVED_SPACE(dev);
- int delta = hh_len - skb_headroom(skb); const struct in6_addr *nexthop; struct neighbour *neigh; int ret;
- /* Be paranoid, rather than too clever. */
- if (unlikely(delta > 0) && dev->header_ops) {
/* pskb_expand_head() might crash, if skb is shared */
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
if (skb->sk)
skb_set_owner_w(skb, skb->sk);
consume_skb(skb);
} else {
kfree_skb(skb);
}
skb = nskb;
}
if (skb &&
pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
kfree_skb(skb);
skb = NULL;
}
if (!skb) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
return -ENOMEM;
}
- }
- if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
On Fri, Jul 23, 2021 at 09:03:33AM +0300, Vasily Averin wrote:
this patch is incomplete, and requires following fixup https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
I've grabbed it too, thanks!
From: Casey Chen cachen@purestorage.com
[ Upstream commit e4b9852a0f4afe40604afb442e3af4452722050a ]
Below two paths could overlap each other if we power off a drive quickly after powering it on. There are multiple races in nvme_setup_io_queues() because of shutdown_lock missing and improper use of NVMEQ_ENABLED bit.
nvme_reset_work() nvme_remove() nvme_setup_io_queues() nvme_dev_disable() ... ... A1 clear NVMEQ_ENABLED bit for admin queue lock retry: B1 nvme_suspend_io_queues() A2 pci_free_irq() admin queue B2 nvme_suspend_queue() admin queue A3 pci_free_irq_vectors() nvme_pci_disable() A4 nvme_setup_irqs(); B3 pci_free_irq_vectors() ... unlock A5 queue_request_irq() for admin queue set NVMEQ_ENABLED bit ... nvme_create_io_queues() A6 result = queue_request_irq(); set NVMEQ_ENABLED bit ... fail to allocate enough IO queues: A7 nvme_suspend_io_queues() goto retry
If B3 runs in between A1 and A2, it will crash if irqaction haven't been freed by A2. B2 is supposed to free admin queue IRQ but it simply can't fulfill the job as A1 has cleared NVMEQ_ENABLED bit.
Fix: combine A1 A2 so IRQ get freed as soon as the NVMEQ_ENABLED bit gets cleared.
After solved #1, A2 could race with B3 if A2 is freeing IRQ while B3 is checking irqaction. A3 also could race with B2 if B2 is freeing IRQ while A3 is checking irqaction.
Fix: A2 and A3 take lock for mutual exclusion.
A3 could race with B3 since they could run free_msi_irqs() in parallel.
Fix: A3 takes lock for mutual exclusion.
A4 could fail to allocate all needed IRQ vectors if A3 and A4 are interrupted by B3.
Fix: A4 takes lock for mutual exclusion.
If A5/A6 happened after B2/B1, B3 will crash since irqaction is not NULL. They are just allocated by A5/A6.
Fix: Lock queue_request_irq() and setting of NVMEQ_ENABLED bit.
A7 could get chance to pci_free_irq() for certain IO queue while B3 is checking irqaction.
Fix: A7 takes lock.
nvme_dev->online_queues need to be protected by shutdown_lock. Since it is not atomic, both paths could modify it using its own copy.
Co-developed-by: Yuanyuan Zhong yzhong@purestorage.com Signed-off-by: Casey Chen cachen@purestorage.com Reviewed-by: Keith Busch kbusch@kernel.org Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/pci.c | 66 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 42ad75ff1348..1e704c63f1e2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1562,6 +1562,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) wmb(); /* ensure the first interrupt sees the initialization */ }
+/* + * Try getting shutdown_lock while setting up IO queues. + */ +static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) +{ + /* + * Give up if the lock is being held by nvme_dev_disable. + */ + if (!mutex_trylock(&dev->shutdown_lock)) + return -ENODEV; + + /* + * Controller is in wrong state, fail early. + */ + if (dev->ctrl.state != NVME_CTRL_CONNECTING) { + mutex_unlock(&dev->shutdown_lock); + return -ENODEV; + } + + return 0; +} + static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) { struct nvme_dev *dev = nvmeq->dev; @@ -1590,8 +1612,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) goto release_cq;
nvmeq->cq_vector = vector; - nvme_init_queue(nvmeq, qid);
+ result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + nvme_init_queue(nvmeq, qid); if (!polled) { result = queue_request_irq(nvmeq); if (result < 0) @@ -1599,10 +1624,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) }
set_bit(NVMEQ_ENABLED, &nvmeq->flags); + mutex_unlock(&dev->shutdown_lock); return result;
release_sq: dev->online_queues--; + mutex_unlock(&dev->shutdown_lock); adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); @@ -2176,7 +2203,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (nr_io_queues == 0) return 0;
- clear_bit(NVMEQ_ENABLED, &adminq->flags); + /* + * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions + * from set to unset. If there is a window to it is truely freed, + * pci_free_irq_vectors() jumping into this window will crash. + * And take lock to avoid racing with pci_free_irq_vectors() in + * nvme_dev_disable() path. + */ + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq);
if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, @@ -2192,14 +2230,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = nvme_remap_bar(dev, size); if (!result) break; - if (!--nr_io_queues) - return -ENOMEM; + if (!--nr_io_queues) { + result = -ENOMEM; + goto out_unlock; + } } while (1); adminq->q_db = dev->dbs;
retry: /* Deregister the admin queue's interrupt */ - pci_free_irq(pdev, 0, adminq); + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq);
/* * If we enable msix early due to not intx, disable it again before @@ -2208,8 +2249,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) pci_free_irq_vectors(pdev);
result = nvme_setup_irqs(dev, nr_io_queues); - if (result <= 0) - return -EIO; + if (result <= 0) { + result = -EIO; + goto out_unlock; + }
dev->num_vecs = result; result = max(result - 1, 1); @@ -2223,8 +2266,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ result = queue_request_irq(adminq); if (result) - return result; + goto out_unlock; set_bit(NVMEQ_ENABLED, &adminq->flags); + mutex_unlock(&dev->shutdown_lock);
result = nvme_create_io_queues(dev); if (result || dev->online_queues < 2) @@ -2233,6 +2277,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (dev->online_queues - 1 < dev->max_qid) { nr_io_queues = dev->online_queues - 1; nvme_disable_io_queues(dev); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; nvme_suspend_io_queues(dev); goto retry; } @@ -2241,6 +2288,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->io_queues[HCTX_TYPE_READ], dev->io_queues[HCTX_TYPE_POLL]); return 0; +out_unlock: + mutex_unlock(&dev->shutdown_lock); + return result; }
static void nvme_del_queue_end(struct request *req, blk_status_t error)
From: Íñigo Huguet ihuguet@redhat.com
[ Upstream commit 788bc000d4c2f25232db19ab3a0add0ba4e27671 ]
Commit 99ba0ea616aa ("sfc: adjust efx->xdp_tx_queue_count with the real number of initialized queues") intended to fix a problem caused by a round up when calculating the number of XDP channels and queues. However, this was not the real problem. The real problem was that the number of XDP TX queues had been reduced to half in commit e26ca4b53582 ("sfc: reduce the number of requested xdp ev queues"), but the variable xdp_tx_queue_count had remained the same.
Once the correct number of XDP TX queues is created again in the previous patch of this series, this also can be reverted since the error doesn't actually exist.
Only in the case that there is a bug in the code we can have different values in xdp_queue_number and efx->xdp_tx_queue_count. Because of this, and per Edward Cree's suggestion, I add instead a WARN_ON to catch if it happens again in the future.
Note that the number of allocated queues can be higher than the number of used ones due to the round up, as explained in the existing comment in the code. That's why we also have to stop increasing xdp_queue_number beyond efx->xdp_tx_queue_count.
Signed-off-by: Íñigo Huguet ihuguet@redhat.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/sfc/efx_channels.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index a3ca406a3561..bea0b27baf4b 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -891,18 +891,20 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); + /* We may have a few left-over XDP TX * queues owing to xdp_tx_queue_count * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. * We still allocate and probe those * TXQs, but never use them. */ - if (xdp_queue_number < efx->xdp_tx_queue_count) + if (xdp_queue_number < efx->xdp_tx_queue_count) { + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - xdp_queue_number++; + xdp_queue_number++; + } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -914,8 +916,7 @@ int efx_set_channels(struct efx_nic *efx) } } } - if (xdp_queue_number) - efx->xdp_tx_queue_count = xdp_queue_number; + WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc)
Hi,
On Fri, Jul 23, 2021 at 5:57 AM Sasha Levin sashal@kernel.org wrote:
From: Íñigo Huguet ihuguet@redhat.com
[ Upstream commit 788bc000d4c2f25232db19ab3a0add0ba4e27671 ]
Applying this commit alone could be problematic, leading even to kernel crash in certain situations.
The real fix is the previous one of the series: f43a24f446da sfc: fix lack of XDP TX queues - error XDP TX failed (-22)
This one can be applied too, but not really a must-have.
Commit 99ba0ea616aa ("sfc: adjust efx->xdp_tx_queue_count with the real number of initialized queues") intended to fix a problem caused by a round up when calculating the number of XDP channels and queues. However, this was not the real problem. The real problem was that the number of XDP TX queues had been reduced to half in commit e26ca4b53582 ("sfc: reduce the number of requested xdp ev queues"), but the variable xdp_tx_queue_count had remained the same.
Once the correct number of XDP TX queues is created again in the previous patch of this series, this also can be reverted since the error doesn't actually exist.
Only in the case that there is a bug in the code we can have different values in xdp_queue_number and efx->xdp_tx_queue_count. Because of this, and per Edward Cree's suggestion, I add instead a WARN_ON to catch if it happens again in the future.
Note that the number of allocated queues can be higher than the number of used ones due to the round up, as explained in the existing comment in the code. That's why we also have to stop increasing xdp_queue_number beyond efx->xdp_tx_queue_count.
Signed-off-by: Íñigo Huguet ihuguet@redhat.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org
drivers/net/ethernet/sfc/efx_channels.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index a3ca406a3561..bea0b27baf4b 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -891,18 +891,20 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++;
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
channel->channel, tx_queue->label,
xdp_queue_number, tx_queue->queue);
/* We may have a few left-over XDP TX * queues owing to xdp_tx_queue_count * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. * We still allocate and probe those * TXQs, but never use them. */
if (xdp_queue_number < efx->xdp_tx_queue_count)
if (xdp_queue_number < efx->xdp_tx_queue_count) {
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
channel->channel, tx_queue->label,
xdp_queue_number, tx_queue->queue); efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
xdp_queue_number++;
xdp_queue_number++;
} } } else { efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -914,8 +916,7 @@ int efx_set_channels(struct efx_nic *efx) } } }
if (xdp_queue_number)
efx->xdp_tx_queue_count = xdp_queue_number;
WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc)
-- 2.30.2
On Fri, Jul 23, 2021 at 12:12 PM Íñigo Huguet ihuguet@redhat.com wrote:
This one can be applied too, but not really a must-have.
Sorry, I have to correct myself. Both must be applied: 58e3bb77bf1b sfc: ensure correct number of XDP queues f43a24f446da sfc: fix lack of XDP TX queues - error XDP TX failed (-22)
Otherwise, if there are some left-over TXQs because of round up, xdp_tx_queue_count coud be set to a wrong value, higher than it should be.
Regards
From: Zheyu Ma zheyuma97@gmail.com
[ Upstream commit 9e5c772954406829e928dbe59891d08938ead04b ]
When calling ttm_range_man_fini(), 'man' may be uninitialized, which may cause a null pointer dereference bug.
Fix this by checking if it is a null pointer.
This log reveals it:
[ 7.902580 ] BUG: kernel NULL pointer dereference, address: 0000000000000058 [ 7.905721 ] RIP: 0010:ttm_range_man_fini+0x40/0x160 [ 7.911826 ] Call Trace: [ 7.911826 ] radeon_ttm_fini+0x167/0x210 [ 7.911826 ] radeon_bo_fini+0x15/0x40 [ 7.913767 ] rs400_fini+0x55/0x80 [ 7.914358 ] radeon_device_fini+0x3c/0x140 [ 7.914358 ] radeon_driver_unload_kms+0x5c/0xe0 [ 7.914358 ] radeon_driver_load_kms+0x13a/0x200 [ 7.914358 ] ? radeon_driver_unload_kms+0xe0/0xe0 [ 7.914358 ] drm_dev_register+0x1db/0x290 [ 7.914358 ] radeon_pci_probe+0x16a/0x230 [ 7.914358 ] local_pci_probe+0x4a/0xb0
Signed-off-by: Zheyu Ma zheyuma97@gmail.com Reviewed-by: Christian König christian.koenig@amd.com Link: https://patchwork.freedesktop.org/patch/msgid/1626274459-8148-1-git-send-ema... Signed-off-by: Christian König christian.koenig@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/ttm/ttm_range_manager.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 707e5c152896..ed053fd15c90 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -146,6 +146,9 @@ int ttm_range_man_fini(struct ttm_device *bdev, struct drm_mm *mm = &rman->mm; int ret;
+ if (!man) + return 0; + ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(bdev, man);
From: Desmond Cheong Zhi Xi desmondcheongzx@gmail.com
[ Upstream commit 16ee572eaf0d09daa4c8a755fdb71e40dbf8562d ]
Patch series "hfs: fix various errors", v2.
This series ultimately aims to address a lockdep warning in hfs_find_init reported by Syzbot [1].
The work done for this led to the discovery of another bug, and the Syzkaller repro test also reveals an invalid memory access error after clearing the lockdep warning. Hence, this series is broken up into three patches:
1. Add a missing call to hfs_find_exit for an error path in hfs_fill_super
2. Fix memory mapping in hfs_bnode_read by fixing calls to kmap
3. Add lock nesting notation to tell lockdep that the observed locking hierarchy is safe
This patch (of 3):
Before exiting hfs_fill_super, the struct hfs_find_data used in hfs_find_init should be passed to hfs_find_exit to be cleaned up, and to release the lock held on the btree.
The call to hfs_find_exit is missing from an error path. We add it back in by consolidating calls to hfs_find_exit for error paths.
Link: https://syzkaller.appspot.com/bug?id=f007ef1d7a31a469e3be7aeb0fde0769b18585d... [1] Link: https://lkml.kernel.org/r/20210701030756.58760-1-desmondcheongzx@gmail.com Link: https://lkml.kernel.org/r/20210701030756.58760-2-desmondcheongzx@gmail.com Signed-off-by: Desmond Cheong Zhi Xi desmondcheongzx@gmail.com Reviewed-by: Viacheslav Dubeyko slava@dubeyko.com Cc: Gustavo A. R. Silva gustavoars@kernel.org Cc: Al Viro viro@zeniv.linux.org.uk Cc: Shuah Khan skhan@linuxfoundation.org Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/hfs/super.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 44d07c9e3a7f..12d9bae39363 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; - goto bail; + goto bail_hfs_find; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } - if (res) { - hfs_find_exit(&fd); - goto bail_no_root; - } + if (res) + goto bail_hfs_find; res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); @@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) /* everything's okay */ return 0;
+bail_hfs_find: + hfs_find_exit(&fd); bail_no_root: pr_err("get root inode failed\n"); bail:
From: Desmond Cheong Zhi Xi desmondcheongzx@gmail.com
[ Upstream commit 54a5ead6f5e2b47131a7385d0c0af18e7b89cb02 ]
Pages that we read in hfs_bnode_read need to be kmapped into kernel address space. However, currently only the 0th page is kmapped. If the given offset + length exceeds this 0th page, then we have an invalid memory access.
To fix this, we kmap relevant pages one by one and copy their relevant portions of data.
An example of invalid memory access occurring without this fix can be seen in the following crash report:
================================================================== BUG: KASAN: use-after-free in memcpy include/linux/fortify-string.h:191 [inline] BUG: KASAN: use-after-free in hfs_bnode_read+0xc4/0xe0 fs/hfs/bnode.c:26 Read of size 2 at addr ffff888125fdcffe by task syz-executor5/4634
CPU: 0 PID: 4634 Comm: syz-executor5 Not tainted 5.13.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x195/0x1f8 lib/dump_stack.c:120 print_address_description.constprop.0+0x1d/0x110 mm/kasan/report.c:233 __kasan_report mm/kasan/report.c:419 [inline] kasan_report.cold+0x7b/0xd4 mm/kasan/report.c:436 check_region_inline mm/kasan/generic.c:180 [inline] kasan_check_range+0x154/0x1b0 mm/kasan/generic.c:186 memcpy+0x24/0x60 mm/kasan/shadow.c:65 memcpy include/linux/fortify-string.h:191 [inline] hfs_bnode_read+0xc4/0xe0 fs/hfs/bnode.c:26 hfs_bnode_read_u16 fs/hfs/bnode.c:34 [inline] hfs_bnode_find+0x880/0xcc0 fs/hfs/bnode.c:365 hfs_brec_find+0x2d8/0x540 fs/hfs/bfind.c:126 hfs_brec_read+0x27/0x120 fs/hfs/bfind.c:165 hfs_cat_find_brec+0x19a/0x3b0 fs/hfs/catalog.c:194 hfs_fill_super+0xc13/0x1460 fs/hfs/super.c:419 mount_bdev+0x331/0x3f0 fs/super.c:1368 hfs_mount+0x35/0x40 fs/hfs/super.c:457 legacy_get_tree+0x10c/0x220 fs/fs_context.c:592 vfs_get_tree+0x93/0x300 fs/super.c:1498 do_new_mount fs/namespace.c:2905 [inline] path_mount+0x13f5/0x20e0 fs/namespace.c:3235 do_mount fs/namespace.c:3248 [inline] __do_sys_mount fs/namespace.c:3456 [inline] __se_sys_mount fs/namespace.c:3433 [inline] __x64_sys_mount+0x2b8/0x340 fs/namespace.c:3433 do_syscall_64+0x37/0xc0 arch/x86/entry/common.c:47 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x45e63a Code: 48 c7 c2 bc ff ff ff f7 d8 64 89 02 b8 ff ff ff ff eb d2 e8 88 04 00 00 0f 1f 84 00 00 00 00 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f9404d410d8 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5 RAX: ffffffffffffffda RBX: 0000000020000248 RCX: 000000000045e63a RDX: 0000000020000000 RSI: 0000000020000100 RDI: 00007f9404d41120 RBP: 00007f9404d41120 R08: 00000000200002c0 R09: 0000000020000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000003 R13: 0000000000000003 R14: 00000000004ad5d8 R15: 0000000000000000
The buggy address belongs to the page: page:00000000dadbcf3e refcount:0 mapcount:0 mapping:0000000000000000 index:0x1 pfn:0x125fdc flags: 0x2fffc0000000000(node=0|zone=2|lastcpupid=0x3fff) raw: 02fffc0000000000 ffffea000497f748 ffffea000497f6c8 0000000000000000 raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: kasan: bad access detected
Memory state around the buggy address: ffff888125fdce80: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff888125fdcf00: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
ffff888125fdcf80: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
^ ffff888125fdd000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff888125fdd080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ==================================================================
Link: https://lkml.kernel.org/r/20210701030756.58760-3-desmondcheongzx@gmail.com Signed-off-by: Desmond Cheong Zhi Xi desmondcheongzx@gmail.com Reviewed-by: Viacheslav Dubeyko slava@dubeyko.com Cc: Al Viro viro@zeniv.linux.org.uk Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Gustavo A. R. Silva gustavoars@kernel.org Cc: Shuah Khan skhan@linuxfoundation.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/hfs/bnode.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index b63a4df7327b..c0a73a6ffb28 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -15,16 +15,31 @@
#include "btree.h"
-void hfs_bnode_read(struct hfs_bnode *node, void *buf, - int off, int len) +void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) { struct page *page; + int pagenum; + int bytes_read; + int bytes_to_read; + void *vaddr;
off += node->page_offset; - page = node->page[0]; + pagenum = off >> PAGE_SHIFT; + off &= ~PAGE_MASK; /* compute page offset for the first page */
- memcpy(buf, kmap(page) + off, len); - kunmap(page); + for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { + if (pagenum >= node->tree->pages_per_bnode) + break; + page = node->page[pagenum]; + bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); + + vaddr = kmap_atomic(page); + memcpy(buf + bytes_read, vaddr + off, bytes_to_read); + kunmap_atomic(vaddr); + + pagenum++; + off = 0; /* page offset only applies to the first page */ + } }
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
From: Desmond Cheong Zhi Xi desmondcheongzx@gmail.com
[ Upstream commit b3b2177a2d795e35dc11597b2609eb1e7e57e570 ]
Syzbot reports a possible recursive lock in [1].
This happens due to missing lock nesting information. From the logs, we see that a call to hfs_fill_super is made to mount the hfs filesystem. While searching for the root inode, the lock on the catalog btree is grabbed. Then, when the parent of the root isn't found, a call to __hfs_bnode_create is made to create the parent of the root. This eventually leads to a call to hfs_ext_read_extent which grabs a lock on the extents btree.
Since the order of locking is catalog btree -> extents btree, this lock hierarchy does not lead to a deadlock.
To tell lockdep that this locking is safe, we add nesting notation to distinguish between catalog btrees, extents btrees, and attributes btrees (for HFS+). This has already been done in hfsplus.
Link: https://syzkaller.appspot.com/bug?id=f007ef1d7a31a469e3be7aeb0fde0769b18585d... [1] Link: https://lkml.kernel.org/r/20210701030756.58760-4-desmondcheongzx@gmail.com Signed-off-by: Desmond Cheong Zhi Xi desmondcheongzx@gmail.com Reported-by: syzbot+b718ec84a87b7e73ade4@syzkaller.appspotmail.com Tested-by: syzbot+b718ec84a87b7e73ade4@syzkaller.appspotmail.com Reviewed-by: Viacheslav Dubeyko slava@dubeyko.com Cc: Al Viro viro@zeniv.linux.org.uk Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Gustavo A. R. Silva gustavoars@kernel.org Cc: Shuah Khan skhan@linuxfoundation.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/hfs/bfind.c | 14 +++++++++++++- fs/hfs/btree.h | 7 +++++++ 2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index 4af318fbda77..ef9498a6e88a 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); - mutex_lock(&tree->tree_lock); + switch (tree->cnid) { + case HFS_CAT_CNID: + mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); + break; + case HFS_EXT_CNID: + mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); + break; + case HFS_ATTR_CNID: + mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); + break; + default: + return -EINVAL; + } return 0; }
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h index 4ba45caf5939..0e6baee93245 100644 --- a/fs/hfs/btree.h +++ b/fs/hfs/btree.h @@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
#define NODE_HASH_SIZE 256
+/* B-tree mutex nested subclasses */ +enum hfs_btree_mutex_classes { + CATALOG_BTREE_MUTEX, + EXTENTS_BTREE_MUTEX, + ATTR_BTREE_MUTEX, +}; + /* A HFS BTree held in memory */ struct hfs_btree { struct super_block *sb;
From: Sudeep Holla sudeep.holla@arm.com
[ Upstream commit 7a691f16ccad05d770f813d9c4b4337a30c6d63f ]
The scmi_linux_errmap buffer access index is supposed to depend on the array size to prevent element out of bounds access. It uses SCMI_ERR_MAX to check bounds but that can mismatch with the array size. It also changes the success into -EIO though scmi_linux_errmap is never used in case of success, it is expected to work for success case too.
It is slightly confusing code as the negative of the error code is used as index to the buffer. Fix it by negating it at the start and make it more readable.
Link: https://lore.kernel.org/r/20210707135028.1869642-1-sudeep.holla@arm.com Reported-by: kernel test robot lkp@intel.com Reported-by: Dan Carpenter dan.carpenter@oracle.com Reviewed-by: Cristian Marussi cristian.marussi@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/firmware/arm_scmi/driver.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 66eb3f0e5daf..ce81ed053876 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -47,7 +47,6 @@ enum scmi_error_codes { SCMI_ERR_GENERIC = -8, /* Generic Error */ SCMI_ERR_HARDWARE = -9, /* Hardware Error */ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ - SCMI_ERR_MAX };
/* List of all SCMI devices active in system */ @@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = {
static inline int scmi_to_linux_errno(int errno) { - if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) - return scmi_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; return -EIO; }
From: Cristian Marussi cristian.marussi@arm.com
[ Upstream commit bdb8742dc6f7c599c3d61959234fe4c23638727b ]
SCMI message headers carry a sequence number and such field is sized to allow for MSG_TOKEN_MAX distinct numbers; moreover zero is not really an acceptable maximum number of pending in-flight messages.
Fix accordingly the checks performed on the value exported by transports in scmi_desc.max_msg
Link: https://lore.kernel.org/r/20210712141833.6628-3-cristian.marussi@arm.com Reported-by: Vincent Guittot vincent.guittot@linaro.org Signed-off-by: Cristian Marussi cristian.marussi@arm.com [sudeep.holla: updated the patch title and error message] Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/firmware/arm_scmi/driver.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index ce81ed053876..b342490e032b 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1022,8 +1022,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { - dev_err(dev, "Maximum message of %d exceeds supported %ld\n", + if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + dev_err(dev, + "Invalid maximum messages %d, not in range [1 - %lu]\n", desc->max_msg, MSG_TOKEN_MAX); return -EINVAL; }
From: Hyunchul Lee hyc.lee@gmail.com
[ Upstream commit c9c9c6815f9004ee1ec87401ed0796853bd70f1b ]
Because the out of range assignment to bit fields are compiler-dependant, the fields could have wrong value.
Signed-off-by: Hyunchul Lee hyc.lee@gmail.com Signed-off-by: Steve French stfrench@microsoft.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/smb2ops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 903de7449aa3..1e5b707833aa 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -555,8 +555,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, p = buf; while (bytes_left >= sizeof(*p)) { info->speed = le64_to_cpu(p->LinkSpeed); - info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE); - info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE); + info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; + info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count); cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
From: Christoph Hellwig hch@lst.de
[ Upstream commit 3ac1d426510f97ace05093ae9f2f710d9cbe6215 ]
The length variable is rather pointless given that it can be trivially deduced from offset and size. Also the initial calculation can lead to KASAN warnings.
Signed-off-by: Christoph Hellwig hch@lst.de Reported-by: Leizhen (ThunderTown) thunder.leizhen@huawei.com Reviewed-by: Darrick J. Wong djwong@kernel.org Signed-off-by: Darrick J. Wong djwong@kernel.org Reviewed-by: Matthew Wilcox (Oracle) willy@infradead.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/iomap/seek.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index dab1b02eba5b..50b8f1418f26 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -83,27 +83,23 @@ loff_t iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret;
/* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO;
- while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_data_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_data_actor); if (ret < 0) return ret; if (ret == 0) - break; - + return offset; offset += ret; - length -= ret; }
- if (length <= 0) - return -ENXIO; - return offset; + /* We've reached the end of the file without finding data */ + return -ENXIO; } EXPORT_SYMBOL_GPL(iomap_seek_data);
From: Christoph Hellwig hch@lst.de
[ Upstream commit 49694d14ff68fa4b5f86019dbcfb44a8bd213e58 ]
The length variable is rather pointless given that it can be trivially deduced from offset and size. Also the initial calculation can lead to KASAN warnings.
Signed-off-by: Christoph Hellwig hch@lst.de Reported-by: Leizhen (ThunderTown) thunder.leizhen@huawei.com Reviewed-by: Darrick J. Wong djwong@kernel.org Signed-off-by: Darrick J. Wong djwong@kernel.org Reviewed-by: Matthew Wilcox (Oracle) willy@infradead.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/iomap/seek.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index 50b8f1418f26..ce6fb810854f 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -35,23 +35,20 @@ loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) { loff_t size = i_size_read(inode); - loff_t length = size - offset; loff_t ret;
/* Nothing to be found before or beyond the end of the file. */ if (offset < 0 || offset >= size) return -ENXIO;
- while (length > 0) { - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, - &offset, iomap_seek_hole_actor); + while (offset < size) { + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT, + ops, &offset, iomap_seek_hole_actor); if (ret < 0) return ret; if (ret == 0) break; - offset += ret; - length -= ret; }
return offset;
From: Sudeep Holla sudeep.holla@arm.com
[ Upstream commit 82a1c67554dff610d6be4e1982c425717b3c6a23 ]
Once the new schema interrupt-controller/arm,vic.yaml is added, we get the below warnings:
arch/arm/boot/dts/versatile-ab.dt.yaml: intc@10140000: $nodename:0: 'intc@10140000' does not match '^interrupt-controller(@[0-9a-f,]+)*$'
arch/arm/boot/dts/versatile-ab.dt.yaml: intc@10140000: 'clear-mask' does not match any of the regexes
Fix the node names for the interrupt controller to conform to the standard node name interrupt-controller@.. Also drop invalid clear-mask property.
Signed-off-by: Sudeep Holla sudeep.holla@arm.com Acked-by: Linus Walleij linus.walleij@linaro.org Link: https://lore.kernel.org/r/20210701132118.759454-1-sudeep.holla@arm.com' Signed-off-by: Arnd Bergmann arnd@arndb.de Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm/boot/dts/versatile-ab.dts | 5 ++--- arch/arm/boot/dts/versatile-pb.dts | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 37bd41ff8dff..151c0220047d 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -195,16 +195,15 @@ amba { #size-cells = <1>; ranges;
- vic: intc@10140000 { + vic: interrupt-controller@10140000 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; reg = <0x10140000 0x1000>; - clear-mask = <0xffffffff>; valid-mask = <0xffffffff>; };
- sic: intc@10003000 { + sic: interrupt-controller@10003000 { compatible = "arm,versatile-sic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 06a0fdf24026..e7e751a858d8 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -7,7 +7,7 @@ / {
amba { /* The Versatile PB is using more SIC IRQ lines than the AB */ - sic: intc@10003000 { + sic: interrupt-controller@10003000 { clear-mask = <0xffffffff>; /* * Valid interrupt lines mask according to
linux-stable-mirror@lists.linaro.org