From: Sven Eckelmann sven@narfation.org
[ Upstream commit fa3a03da549a889fc9dbc0d3c5908eb7882cac8f ]
The bucket variable is only updated outside the loop over the mcast_flags buckets. It will only be updated during a dumping run when the dumping has to be interrupted and a new message has to be started.
This could result in repeated or missing entries when the multicast flags are dumped to userspace.
Fixes: d2d489b7d851 ("batman-adv: Add inconsistent multicast netlink dump detection") Signed-off-by: Sven Eckelmann sven@narfation.org Signed-off-by: Simon Wunderlich sw@simonwunderlich.de Signed-off-by: Sasha Levin sashal@kernel.org --- net/batman-adv/multicast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index ec54e236e3454..50fe9dfb088b6 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -1653,7 +1653,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
while (bucket_tmp < hash->size) { if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, - *bucket, &idx_tmp)) + bucket_tmp, &idx_tmp)) break;
bucket_tmp++;
From: Andrii Nakryiko andriin@fb.com
[ Upstream commit 5d01ab7bac467edfc530e6ccf953921def935c62 ]
Libbpf stores associated BTF FD per each instance of bpf_program. When program is unloaded, that FD is closed. This is wrong, because leads to a race and possibly closing of unrelated files, if application simultaneously opens new files while bpf_programs are unloaded.
It's also unnecessary, because struct btf "owns" that FD, and btf__free(), called from bpf_object__close() will close it. Thus the fix is to never have per-program BTF FD and fetch it from obj->btf, when necessary.
Fixes: 2993e0515bb4 ("tools/bpf: add support to read .BTF.ext sections") Reported-by: Andrey Ignatov rdna@fb.com Signed-off-by: Andrii Nakryiko andriin@fb.com Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- tools/lib/bpf/libbpf.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 3865a5d272514..e308fcf16cdd0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -178,7 +178,6 @@ struct bpf_program { bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type; - int btf_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -305,7 +304,6 @@ void bpf_program__unload(struct bpf_program *prog) prog->instances.nr = -1; zfree(&prog->instances.fds);
- zclose(prog->btf_fd); zfree(&prog->func_info); zfree(&prog->line_info); } @@ -382,7 +380,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx, prog->instances.fds = NULL; prog->instances.nr = -1; prog->type = BPF_PROG_TYPE_UNSPEC; - prog->btf_fd = -1;
return 0; errout: @@ -1883,9 +1880,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj, prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); }
- if (!insn_offset) - prog->btf_fd = btf__fd(obj->btf); - return 0; }
@@ -2060,7 +2054,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, char *cp, errmsg[STRERR_BUFSIZE]; int log_buf_size = BPF_LOG_BUF_SIZE; char *log_buf; - int ret; + int btf_fd, ret;
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); load_attr.prog_type = prog->type; @@ -2072,7 +2066,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.license = license; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; - load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; + btf_fd = bpf_object__btf_fd(prog->obj); + load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size; load_attr.func_info_cnt = prog->func_info_cnt;
From: Andrii Nakryiko andriin@fb.com
[ Upstream commit 3415ec643e7bd644b03026efbe2f2b36cbe9b34b ]
5d01ab7bac46 ("libbpf: fix erroneous multi-closing of BTF FD") introduced backwards-compatibility issue, manifesting itself as -E2BIG error returned on program load due to unknown non-zero btf_fd attribute value for BPF_PROG_LOAD sys_bpf() sub-command.
This patch fixes bug by ensuring that we only ever associate BTF FD with program if there is a BTF.ext data that was successfully loaded into kernel, which automatically means kernel supports func_info/line_info and associated BTF FD for progs (checked and ensured also by BTF sanitization code).
Fixes: 5d01ab7bac46 ("libbpf: fix erroneous multi-closing of BTF FD") Reported-by: Andrey Ignatov rdna@fb.com Signed-off-by: Andrii Nakryiko andriin@fb.com Signed-off-by: Alexei Starovoitov ast@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- tools/lib/bpf/libbpf.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e308fcf16cdd0..ce579e3654d60 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2066,7 +2066,11 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.license = license; load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; - btf_fd = bpf_object__btf_fd(prog->obj); + /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ + if (prog->obj->btf_ext) + btf_fd = bpf_object__btf_fd(prog->obj); + else + btf_fd = -1; load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; load_attr.func_info = prog->func_info; load_attr.func_info_rec_size = prog->func_info_rec_size;
From: Florian Westphal fw@strlen.de
[ Upstream commit 589b474a4b7ce409d6821ef17234a995841bd131 ]
This makes the previously added 'encap test' pass. Because its possible that the xfrm dst entry becomes stale while such a flow is offloaded, we need to call dst_check() -- the notifier that handles this for non-tunneled traffic isn't sufficient, because SA or or policies might have changed.
If dst becomes stale the flow offload entry will be tagged for teardown and packets will be passed to 'classic' forwarding path.
Removing the entry right away is problematic, as this would introduce a race condition with the gc worker.
In case flow is long-lived, it could eventually be offloaded again once the gc worker removes the entry from the flow table.
Signed-off-by: Florian Westphal fw@strlen.de Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/netfilter/nf_flow_table_ip.c | 43 ++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+)
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index cdfc33517e85b..d68c801dd614b 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -214,6 +214,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) return true; }
+static int nf_flow_offload_dst_check(struct dst_entry *dst) +{ + if (unlikely(dst_xfrm(dst))) + return dst_check(dst, 0) ? 0 : -1; + + return 0; +} + +static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, + const struct nf_hook_state *state, + struct dst_entry *dst) +{ + skb_orphan(skb); + skb_dst_set_noref(skb, dst); + skb->tstamp = 0; + dst_output(state->net, state->sk, skb); + return NF_STOLEN; +} + unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -254,6 +273,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) return NF_ACCEPT;
+ if (nf_flow_offload_dst_check(&rt->dst)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } + if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) return NF_DROP;
@@ -261,6 +285,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, iph = ip_hdr(skb); ip_decrease_ttl(iph);
+ if (unlikely(dst_xfrm(&rt->dst))) { + memset(skb->cb, 0, sizeof(struct inet_skb_parm)); + IPCB(skb)->iif = skb->dev->ifindex; + IPCB(skb)->flags = IPSKB_FORWARDED; + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + skb->dev = outdev; nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); skb_dst_set_noref(skb, &rt->dst); @@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, sizeof(*ip6h))) return NF_ACCEPT;
+ if (nf_flow_offload_dst_check(&rt->dst)) { + flow_offload_teardown(flow); + return NF_ACCEPT; + } + if (skb_try_make_writable(skb, sizeof(*ip6h))) return NF_DROP;
@@ -477,6 +513,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, ip6h = ipv6_hdr(skb); ip6h->hop_limit--;
+ if (unlikely(dst_xfrm(&rt->dst))) { + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); + IP6CB(skb)->iif = skb->dev->ifindex; + IP6CB(skb)->flags = IP6SKB_FORWARDED; + return nf_flow_xmit_xfrm(skb, state, &rt->dst); + } + skb->dev = outdev; nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); skb_dst_set_noref(skb, &rt->dst);
From: Aya Levin ayal@mellanox.com
[ Upstream commit 276d197e70bcc47153592f4384675b51c7d83aba ]
CQE recovery function begins with test and set of recovery bit. Add an error flow which ensures clearing of this bit when leaving the recovery function, to allow further recoveries to take place. This allows removal of clearing recovery bit on sq activate.
Fixes: de8650a82071 ("net/mlx5e: Add tx reporter support") Signed-off-by: Aya Levin ayal@mellanox.com Reviewed-by: Tariq Toukan tariqt@mellanox.com Signed-off-by: Saeed Mahameed saeedm@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 12 ++++++++---- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - 2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index c1caf14bc3346..c7f86453c6384 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -80,17 +80,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) if (err) { netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", sq->sqn, err); - return err; + goto out; }
if (state != MLX5_SQC_STATE_ERR) - return 0; + goto out;
mlx5e_tx_disable_queue(sq->txq);
err = mlx5e_wait_for_sq_flush(sq); if (err) - return err; + goto out;
/* At this point, no new packets will arrive from the stack as TXQ is * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all @@ -99,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
err = mlx5e_sq_to_ready(sq, state); if (err) - return err; + goto out;
mlx5e_reset_txqsq_cc_pc(sq); sq->stats->recover++; + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); mlx5e_activate_txqsq(sq);
return 0; +out: + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); + return err; }
static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 882d26b8095da..bbdfdaf06391a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1279,7 +1279,6 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); - clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq);
From: Sylwester Nawrocki s.nawrocki@samsung.com
[ Upstream commit bf32e7dbfce87d518c0ca77af890eae9ab8d6ab9 ]
In order to make it easier in subsequent patch to create different subcmu lists for exynos5420 and exynos5800 SoCs the code is rewritten so we pass an array of pointers to the subcmus initialization function.
Fixes: b06a532bf1fa ("clk: samsung: Add Exynos5 sub-CMU clock driver") Tested-by: Jaafar Ali jaafarkhalaf@gmail.com Signed-off-by: Sylwester Nawrocki s.nawrocki@samsung.com Link: https://lkml.kernel.org/r/20190808144929.18685-1-s.nawrocki@samsung.com Reviewed-by: Marek Szyprowski m.szyprowski@samsung.com Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/samsung/clk-exynos5-subcmu.c | 16 +++---- drivers/clk/samsung/clk-exynos5-subcmu.h | 2 +- drivers/clk/samsung/clk-exynos5250.c | 7 ++- drivers/clk/samsung/clk-exynos5420.c | 60 ++++++++++++++---------- 4 files changed, 49 insertions(+), 36 deletions(-)
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c index 91db7894125df..65c82d922b05c 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.c +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c @@ -14,7 +14,7 @@ #include "clk-exynos5-subcmu.h"
static struct samsung_clk_provider *ctx; -static const struct exynos5_subcmu_info *cmu; +static const struct exynos5_subcmu_info **cmu; static int nr_cmus;
static void exynos5_subcmu_clk_save(void __iomem *base, @@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx, * when OF-core populates all device-tree nodes. */ void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, - const struct exynos5_subcmu_info *_cmu) + const struct exynos5_subcmu_info **_cmu) { ctx = _ctx; cmu = _cmu; nr_cmus = _nr_cmus;
for (; _nr_cmus--; _cmu++) { - exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, - _cmu->nr_gate_clks); - exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, - _cmu->nr_suspend_regs); + exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks, + (*_cmu)->nr_gate_clks); + exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs, + (*_cmu)->nr_suspend_regs); } }
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev) if (of_property_read_string(np, "label", &name) < 0) continue; for (i = 0; i < nr_cmus; i++) - if (strcmp(cmu[i].pd_name, name) == 0) + if (strcmp(cmu[i]->pd_name, name) == 0) exynos5_clk_register_subcmu(&pdev->dev, - &cmu[i], np); + cmu[i], np); } return 0; } diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h index 755ee8aaa3de5..9ae5356f25aa4 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.h +++ b/drivers/clk/samsung/clk-exynos5-subcmu.h @@ -21,6 +21,6 @@ struct exynos5_subcmu_info { };
void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, - const struct exynos5_subcmu_info *cmu); + const struct exynos5_subcmu_info **cmu);
#endif diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index f2b8968817682..931c70a4da196 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = { .pd_name = "DISP1", };
+static const struct exynos5_subcmu_info *exynos5250_subcmus[] = { + &exynos5250_disp_subcmu, +}; + static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ @@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs)); - exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus), + exynos5250_subcmus);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 12d800fd95286..a6ea5d7e63d02 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -1232,32 +1232,40 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = { { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ };
-static const struct exynos5_subcmu_info exynos5x_subcmus[] = { - { - .div_clks = exynos5x_disp_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), - .gate_clks = exynos5x_disp_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), - .suspend_regs = exynos5x_disp_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), - .pd_name = "DISP", - }, { - .div_clks = exynos5x_gsc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), - .gate_clks = exynos5x_gsc_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), - .suspend_regs = exynos5x_gsc_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), - .pd_name = "GSC", - }, { - .div_clks = exynos5x_mfc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), - .gate_clks = exynos5x_mfc_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), - .suspend_regs = exynos5x_mfc_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), - .pd_name = "MFC", - }, +static const struct exynos5_subcmu_info exynos5x_disp_subcmu = { + .div_clks = exynos5x_disp_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), + .gate_clks = exynos5x_disp_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), + .suspend_regs = exynos5x_disp_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), + .pd_name = "DISP", +}; + +static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = { + .div_clks = exynos5x_gsc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), + .gate_clks = exynos5x_gsc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), + .suspend_regs = exynos5x_gsc_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), + .pd_name = "GSC", +}; + +static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { + .div_clks = exynos5x_mfc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), + .gate_clks = exynos5x_mfc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), + .suspend_regs = exynos5x_mfc_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), + .pd_name = "MFC", +}; + +static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { + &exynos5x_disp_subcmu, + &exynos5x_gsc_subcmu, + &exynos5x_mfc_subcmu, };
static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
From: Sylwester Nawrocki s.nawrocki@samsung.com
[ Upstream commit b6adeb6bc61c2567b9efd815d61a61b34a2e51a6 ]
This patch fixes broken sound on Exynos5422/5800 platforms after system/suspend resume cycle in cases where the audio root clock is derived from MAU_EPLL_CLK.
In order to preserve state of the USER_MUX_MAU_EPLL_CLK clock mux during system suspend/resume cycle for Exynos5800 we group the MAU block input clocks in "MAU" sub-CMU and add the clock mux control bit to .suspend_regs. This ensures that user configuration of the mux is not lost after the PMU block changes the mux setting to OSC_DIV when switching off the MAU power domain.
Adding the SRC_TOP9 register to exynos5800_clk_regs[] array is not sufficient as at the time of the syscore_ops suspend call MAU power domain is already turned off and we already save and subsequently restore an incorrect register's value.
Fixes: b06a532bf1fa ("clk: samsung: Add Exynos5 sub-CMU clock driver") Reported-by: Jaafar Ali jaafarkhalaf@gmail.com Suggested-by: Marek Szyprowski m.szyprowski@samsung.com Tested-by: Jaafar Ali jaafarkhalaf@gmail.com Signed-off-by: Sylwester Nawrocki s.nawrocki@samsung.com Link: https://lkml.kernel.org/r/20190808144929.18685-2-s.nawrocki@samsung.com Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/samsung/clk-exynos5420.c | 54 ++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 11 deletions(-)
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index a6ea5d7e63d02..5eb0ce4b2648b 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -524,8 +524,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), - GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", - SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), };
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -567,8 +565,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), + /* Maudio Block */ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", + GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", + GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), };
static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { @@ -994,12 +997,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
- /* Maudio Block */ - GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", - GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", - GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), - /* FSYS Block */ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), @@ -1232,6 +1229,20 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = { { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ };
+ +static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = { + GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", + SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", + GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", + GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), +}; + +static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = { + { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */ +}; + static const struct exynos5_subcmu_info exynos5x_disp_subcmu = { .div_clks = exynos5x_disp_div_clks, .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), @@ -1262,12 +1273,27 @@ static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { .pd_name = "MFC", };
+static const struct exynos5_subcmu_info exynos5800_mau_subcmu = { + .gate_clks = exynos5800_mau_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks), + .suspend_regs = exynos5800_mau_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs), + .pd_name = "MAU", +}; + static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { &exynos5x_disp_subcmu, &exynos5x_gsc_subcmu, &exynos5x_mfc_subcmu, };
+static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { + &exynos5x_disp_subcmu, + &exynos5x_gsc_subcmu, + &exynos5x_mfc_subcmu, + &exynos5800_mau_subcmu, +}; + static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { PLL_35XX_RATE(24 * MHZ, 2000000000, 250, 3, 0), PLL_35XX_RATE(24 * MHZ, 1900000000, 475, 6, 0), @@ -1483,11 +1509,17 @@ static void __init exynos5x_clk_init(struct device_node *np, samsung_clk_extended_sleep_init(reg_base, exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); - if (soc == EXYNOS5800) + + if (soc == EXYNOS5800) { samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, ARRAY_SIZE(exynos5800_clk_regs)); - exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), - exynos5x_subcmus); + + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus), + exynos5800_subcmus); + } else { + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), + exynos5x_subcmus); + }
samsung_clk_of_add_provider(np, ctx); }
From: Marek Szyprowski m.szyprowski@samsung.com
[ Upstream commit baf7b79e1ad79a41fafd8ab8597b9a96962d822d ]
M2M scaler clocks require special handling of their parent bus clock during power domain on/off sequences. MSCL clocks were not initially added to the sub-CMU handler, because that time there was no driver for the M2M scaler device and it was not possible to test it.
This patch fixes this issue. Parent clock for M2M scaler devices is now properly preserved during MSC power domain on/off sequence. This gives M2M scaler devices proper performance: fullHD XRGB32 image 1000 rotations test takes 3.17s instead of 45.08s.
Fixes: b06a532bf1fa ("clk: samsung: Add Exynos5 sub-CMU clock driver") Signed-off-by: Marek Szyprowski m.szyprowski@samsung.com Link: https://lkml.kernel.org/r/20190808121839.23892-1-m.szyprowski@samsung.com Acked-by: Sylwester Nawrocki s.nawrocki@samsung.com Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/samsung/clk-exynos5420.c | 48 ++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 14 deletions(-)
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 5eb0ce4b2648b..893697e00d2aa 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -870,9 +870,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = { /* GSCL Block */ DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
- /* MSCL Block */ - DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), - /* PSGEN */ DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), @@ -1136,17 +1133,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", GATE_IP_GSCL1, 17, 0, 0),
- /* MSCL Block */ - GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), - GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), - GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), - GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", - GATE_IP_MSCL, 8, 0, 0), - GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", - GATE_IP_MSCL, 9, 0, 0), - GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", - GATE_IP_MSCL, 10, 0, 0), - /* ISP */ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), @@ -1229,6 +1215,28 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = { { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ };
+static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = { + /* MSCL Block */ + GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), + GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), + GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), + GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", + GATE_IP_MSCL, 8, 0, 0), + GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", + GATE_IP_MSCL, 9, 0, 0), + GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", + GATE_IP_MSCL, 10, 0, 0), +}; + +static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = { + DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), +}; + +static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = { + { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */ + { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */ + { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */ +};
static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = { GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", @@ -1273,6 +1281,16 @@ static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { .pd_name = "MFC", };
+static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = { + .div_clks = exynos5x_mscl_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks), + .gate_clks = exynos5x_mscl_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks), + .suspend_regs = exynos5x_mscl_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs), + .pd_name = "MSC", +}; + static const struct exynos5_subcmu_info exynos5800_mau_subcmu = { .gate_clks = exynos5800_mau_gate_clks, .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks), @@ -1285,12 +1303,14 @@ static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { &exynos5x_disp_subcmu, &exynos5x_gsc_subcmu, &exynos5x_mfc_subcmu, + &exynos5x_mscl_subcmu, };
static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { &exynos5x_disp_subcmu, &exynos5x_gsc_subcmu, &exynos5x_mfc_subcmu, + &exynos5x_mscl_subcmu, &exynos5800_mau_subcmu, };
From: Fuqian Huang huangfq.daxian@gmail.com
[ Upstream commit 8c25d0887a8bd0e1ca2074ac0c6dff173787a83b ]
As spin_unlock_irq will enable interrupts. Function tsi108_stat_carry is called from interrupt handler tsi108_irq. Interrupts are enabled in interrupt handler. Use spin_lock_irqsave/spin_unlock_irqrestore instead of spin_(un)lock_irq in IRQ context to avoid this.
Signed-off-by: Fuqian Huang huangfq.daxian@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/tundra/tsi108_eth.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 78a7de3fb622f..c62f474b6d08e 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, static void tsi108_stat_carry(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; u32 carry1, carry2;
- spin_lock_irq(&data->misclock); + spin_lock_irqsave(&data->misclock, flags);
carry1 = TSI_READ(TSI108_STAT_CARRY1); carry2 = TSI_READ(TSI108_STAT_CARRY2); @@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev) TSI108_STAT_TXPAUSEDROP_CARRY, &data->tx_pause_drop);
- spin_unlock_irq(&data->misclock); + spin_unlock_irqrestore(&data->misclock, flags); }
/* Read a stat counter atomically with respect to carries.
From: Pablo Neira Ayuso pablo@netfilter.org
[ Upstream commit 6a0a8d10a3661a036b55af695542a714c429ab7c ]
If a rule that has already a bound anonymous set fails to be added, the preparation phase releases the rule and the bound set. However, the transaction object from the abort path still has a reference to the set object that is stale, leading to a use-after-free when checking for the set->bound field. Add a new field to the transaction that specifies if the set is bound, so the abort path can skip releasing it since the rule command owns it and it takes care of releasing it. After this update, the set->bound field is removed.
[ 24.649883] Unable to handle kernel paging request at virtual address 0000000000040434 [ 24.657858] Mem abort info: [ 24.660686] ESR = 0x96000004 [ 24.663769] Exception class = DABT (current EL), IL = 32 bits [ 24.669725] SET = 0, FnV = 0 [ 24.672804] EA = 0, S1PTW = 0 [ 24.675975] Data abort info: [ 24.678880] ISV = 0, ISS = 0x00000004 [ 24.682743] CM = 0, WnR = 0 [ 24.685723] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000428952000 [ 24.692207] [0000000000040434] pgd=0000000000000000 [ 24.697119] Internal error: Oops: 96000004 [#1] SMP [...] [ 24.889414] Call trace: [ 24.891870] __nf_tables_abort+0x3f0/0x7a0 [ 24.895984] nf_tables_abort+0x20/0x40 [ 24.899750] nfnetlink_rcv_batch+0x17c/0x588 [ 24.904037] nfnetlink_rcv+0x13c/0x190 [ 24.907803] netlink_unicast+0x18c/0x208 [ 24.911742] netlink_sendmsg+0x1b0/0x350 [ 24.915682] sock_sendmsg+0x4c/0x68 [ 24.919185] ___sys_sendmsg+0x288/0x2c8 [ 24.923037] __sys_sendmsg+0x7c/0xd0 [ 24.926628] __arm64_sys_sendmsg+0x2c/0x38 [ 24.930744] el0_svc_common.constprop.0+0x94/0x158 [ 24.935556] el0_svc_handler+0x34/0x90 [ 24.939322] el0_svc+0x8/0xc [ 24.942216] Code: 37280300 f9404023 91014262 aa1703e0 (f9401863) [ 24.948336] ---[ end trace cebbb9dcbed3b56f ]---
Fixes: f6ac85858976 ("netfilter: nf_tables: unbind set in rule from commit path") Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/net/netfilter/nf_tables.h | 9 +++++++-- net/netfilter/nf_tables_api.c | 15 ++++++++++----- 2 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5b8624ae4a27f..930d062940b7d 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -419,8 +419,7 @@ struct nft_set { unsigned char *udata; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; - u16 flags:13, - bound:1, + u16 flags:14, genmask:2; u8 klen; u8 dlen; @@ -1333,12 +1332,15 @@ struct nft_trans_rule { struct nft_trans_set { struct nft_set *set; u32 set_id; + bool bound; };
#define nft_trans_set(trans) \ (((struct nft_trans_set *)trans->data)->set) #define nft_trans_set_id(trans) \ (((struct nft_trans_set *)trans->data)->set_id) +#define nft_trans_set_bound(trans) \ + (((struct nft_trans_set *)trans->data)->bound)
struct nft_trans_chain { bool update; @@ -1369,12 +1371,15 @@ struct nft_trans_table { struct nft_trans_elem { struct nft_set *set; struct nft_set_elem elem; + bool bound; };
#define nft_trans_elem_set(trans) \ (((struct nft_trans_elem *)trans->data)->set) #define nft_trans_elem(trans) \ (((struct nft_trans_elem *)trans->data)->elem) +#define nft_trans_elem_set_bound(trans) \ + (((struct nft_trans_elem *)trans->data)->bound)
struct nft_trans_obj { struct nft_object *obj; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index bcf17fb46d965..8e4cdae2c4f14 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -136,9 +136,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) return;
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { - if (trans->msg_type == NFT_MSG_NEWSET && - nft_trans_set(trans) == set) { - set->bound = true; + switch (trans->msg_type) { + case NFT_MSG_NEWSET: + if (nft_trans_set(trans) == set) + nft_trans_set_bound(trans) = true; + break; + case NFT_MSG_NEWSETELEM: + if (nft_trans_elem_set(trans) == set) + nft_trans_elem_set_bound(trans) = true; break; } } @@ -6849,7 +6854,7 @@ static int __nf_tables_abort(struct net *net) break; case NFT_MSG_NEWSET: trans->ctx.table->use--; - if (nft_trans_set(trans)->bound) { + if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); break; } @@ -6861,7 +6866,7 @@ static int __nf_tables_abort(struct net *net) nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: - if (nft_trans_elem_set(trans)->bound) { + if (nft_trans_elem_set_bound(trans)) { nft_trans_destroy(trans); break; }
From: Pablo Neira Ayuso pablo@netfilter.org
[ Upstream commit 3e68db2f6422d711550a32cbc87abd97bb6efab3 ]
Update conntrack entry to pick up expired flows, otherwise the conntrack entry gets stuck with the internal offload timeout (one day). The TCP state also needs to be adjusted to ESTABLISHED state and tracking is set to liberal mode in order to give conntrack a chance to pick up the expired flow.
Fixes: ac2a66665e23 ("netfilter: add generic flow table infrastructure") Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/netfilter/nf_flow_table_core.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 948b4ebbe3fbd..4254e42605135 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -112,7 +112,7 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static void flow_offload_fixup_ct_state(struct nf_conn *ct) +static void flow_offload_fixup_ct(struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; unsigned int timeout; @@ -209,6 +209,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) } EXPORT_SYMBOL_GPL(flow_offload_add);
+static inline bool nf_flow_has_expired(const struct flow_offload *flow) +{ + return (__s32)(flow->timeout - (u32)jiffies) <= 0; +} + static void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow) { @@ -224,6 +229,9 @@ static void flow_offload_del(struct nf_flowtable *flow_table, e = container_of(flow, struct flow_offload_entry, flow); clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+ if (nf_flow_has_expired(flow)) + flow_offload_fixup_ct(e->ct); + flow_offload_free(flow); }
@@ -234,7 +242,7 @@ void flow_offload_teardown(struct flow_offload *flow) flow->flags |= FLOW_OFFLOAD_TEARDOWN;
e = container_of(flow, struct flow_offload_entry, flow); - flow_offload_fixup_ct_state(e->ct); + flow_offload_fixup_ct(e->ct); } EXPORT_SYMBOL_GPL(flow_offload_teardown);
@@ -299,11 +307,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table, return err; }
-static inline bool nf_flow_has_expired(const struct flow_offload *flow) -{ - return (__s32)(flow->timeout - (u32)jiffies) <= 0; -} - static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) { struct nf_flowtable *flow_table = data;
From: Pablo Neira Ayuso pablo@netfilter.org
[ Upstream commit 1e5b2471bcc4838df298080ae1ec042c2cbc9ce9 ]
Flows that are in teardown state (due to RST / FIN TCP packet) still have their offload flag set on. Hence, the conntrack garbage collector may race to undo the timeout adjustment that the fixup routine performs, leaving the conntrack entry in place with the internal offload timeout (one day).
Update teardown flow state to ESTABLISHED and set tracking to liberal, then once the offload bit is cleared, adjust timeout if it is more than the default fixup timeout (conntrack might already have set a lower timeout from the packet path).
Fixes: da5984e51063 ("netfilter: nf_flow_table: add support for sending flows back to the slow path") Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/netfilter/nf_flow_table_core.c | 34 ++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 4254e42605135..49248fe5847a1 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -112,15 +112,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp) #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static void flow_offload_fixup_ct(struct nf_conn *ct) +static inline __s32 nf_flow_timeout_delta(unsigned int timeout) +{ + return (__s32)(timeout - (u32)jiffies); +} + +static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; + int l4num = nf_ct_protonum(ct); unsigned int timeout; - int l4num; - - l4num = nf_ct_protonum(ct); - if (l4num == IPPROTO_TCP) - flow_offload_fixup_tcp(&ct->proto.tcp);
l4proto = nf_ct_l4proto_find(l4num); if (!l4proto) @@ -133,7 +134,20 @@ static void flow_offload_fixup_ct(struct nf_conn *ct) else return;
- ct->timeout = nfct_time_stamp + timeout; + if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout) + ct->timeout = nfct_time_stamp + timeout; +} + +static void flow_offload_fixup_ct_state(struct nf_conn *ct) +{ + if (nf_ct_protonum(ct) == IPPROTO_TCP) + flow_offload_fixup_tcp(&ct->proto.tcp); +} + +static void flow_offload_fixup_ct(struct nf_conn *ct) +{ + flow_offload_fixup_ct_state(ct); + flow_offload_fixup_ct_timeout(ct); }
void flow_offload_free(struct flow_offload *flow) @@ -211,7 +225,7 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
static inline bool nf_flow_has_expired(const struct flow_offload *flow) { - return (__s32)(flow->timeout - (u32)jiffies) <= 0; + return nf_flow_timeout_delta(flow->timeout) <= 0; }
static void flow_offload_del(struct nf_flowtable *flow_table, @@ -231,6 +245,8 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
if (nf_flow_has_expired(flow)) flow_offload_fixup_ct(e->ct); + else if (flow->flags & FLOW_OFFLOAD_TEARDOWN) + flow_offload_fixup_ct_timeout(e->ct);
flow_offload_free(flow); } @@ -242,7 +258,7 @@ void flow_offload_teardown(struct flow_offload *flow) flow->flags |= FLOW_OFFLOAD_TEARDOWN;
e = container_of(flow, struct flow_offload_entry, flow); - flow_offload_fixup_ct(e->ct); + flow_offload_fixup_ct_state(e->ct); } EXPORT_SYMBOL_GPL(flow_offload_teardown);
From: David Howells dhowells@redhat.com
[ Upstream commit 730c5fd42c1e3652a065448fd235cb9fafb2bd10 ]
The object lifetime management on the rxrpc_local struct is broken in that the rxrpc_local_processor() function is expected to clean up and remove an object - but it may get requeued by packets coming in on the backing UDP socket once it starts running.
This may result in the assertion in rxrpc_local_rcu() firing because the memory has been scheduled for RCU destruction whilst still queued:
rxrpc: Assertion failed ------------[ cut here ]------------ kernel BUG at net/rxrpc/local_object.c:468!
Note that if the processor comes around before the RCU free function, it will just do nothing because ->dead is true.
Fix this by adding a separate refcount to count active users of the endpoint that causes the endpoint to be destroyed when it reaches 0.
The original refcount can then be used to refcount objects through the work processor and cause the memory to be rcu freed when that reaches 0.
Fixes: 4f95dd78a77e ("rxrpc: Rework local endpoint management") Reported-by: syzbot+1e0edc4b8b7494c28450@syzkaller.appspotmail.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/rxrpc/af_rxrpc.c | 4 +- net/rxrpc/ar-internal.h | 5 ++- net/rxrpc/input.c | 16 ++++++-- net/rxrpc/local_object.c | 86 +++++++++++++++++++++++++--------------- 4 files changed, 72 insertions(+), 39 deletions(-)
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index d09eaf1535441..8c9bd3ae9edf7 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
service_in_use: write_unlock(&local->services_lock); - rxrpc_put_local(local); + rxrpc_unuse_local(local); ret = -EADDRINUSE; error_unlock: release_sock(&rx->sk); @@ -901,7 +901,7 @@ static int rxrpc_release_sock(struct sock *sk) rxrpc_queue_work(&rxnet->service_conn_reaper); rxrpc_queue_work(&rxnet->client_conn_reaper);
- rxrpc_put_local(rx->local); + rxrpc_unuse_local(rx->local); rx->local = NULL; key_put(rx->key); rx->key = NULL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 80335b4ee4fd6..6a231c8f43066 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -254,7 +254,8 @@ struct rxrpc_security { */ struct rxrpc_local { struct rcu_head rcu; - atomic_t usage; + atomic_t active_users; /* Number of users of the local endpoint */ + atomic_t usage; /* Number of references to the structure */ struct rxrpc_net *rxnet; /* The network ns in which this resides */ struct list_head link; struct socket *socket; /* my UDP socket */ @@ -1002,6 +1003,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); void rxrpc_put_local(struct rxrpc_local *); +struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *); +void rxrpc_unuse_local(struct rxrpc_local *); void rxrpc_queue_local(struct rxrpc_local *); void rxrpc_destroy_all_locals(struct rxrpc_net *);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5bd6f1546e5c6..ee95d1cd1cdf2 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1108,8 +1108,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local, { _enter("%p,%p", local, skb);
- skb_queue_tail(&local->event_queue, skb); - rxrpc_queue_local(local); + if (rxrpc_get_local_maybe(local)) { + skb_queue_tail(&local->event_queue, skb); + rxrpc_queue_local(local); + } else { + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + } }
/* @@ -1119,8 +1123,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) { CHECK_SLAB_OKAY(&local->usage);
- skb_queue_tail(&local->reject_queue, skb); - rxrpc_queue_local(local); + if (rxrpc_get_local_maybe(local)) { + skb_queue_tail(&local->reject_queue, skb); + rxrpc_queue_local(local); + } else { + rxrpc_free_skb(skb, rxrpc_skb_rx_freed); + } }
/* diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index b1c71bad510b7..9798159ee65fa 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { atomic_set(&local->usage, 1); + atomic_set(&local->active_users, 1); local->rxnet = rxnet; INIT_LIST_HEAD(&local->link); INIT_WORK(&local->processor, rxrpc_local_processor); @@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, * bind the transport socket may still fail if we're attempting * to use a local address that the dying object is still using. */ - if (!rxrpc_get_local_maybe(local)) { - cursor = cursor->next; - list_del_init(&local->link); + if (!rxrpc_use_local(local)) break; - }
age = "old"; goto found; @@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, if (ret < 0) goto sock_error;
- list_add_tail(&local->link, cursor); + if (cursor != &rxnet->local_endpoints) + list_replace(cursor, &local->link); + else + list_add_tail(&local->link, cursor); age = "new";
found: @@ -342,7 +343,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) }
/* - * Queue a local endpoint. + * Queue a local endpoint unless it has become unreferenced and pass the + * caller's reference to the work item. */ void rxrpc_queue_local(struct rxrpc_local *local) { @@ -351,15 +353,8 @@ void rxrpc_queue_local(struct rxrpc_local *local) if (rxrpc_queue_work(&local->processor)) trace_rxrpc_local(local, rxrpc_local_queued, atomic_read(&local->usage), here); -} - -/* - * A local endpoint reached its end of life. - */ -static void __rxrpc_put_local(struct rxrpc_local *local) -{ - _enter("%d", local->debug_id); - rxrpc_queue_work(&local->processor); + else + rxrpc_put_local(local); }
/* @@ -375,10 +370,45 @@ void rxrpc_put_local(struct rxrpc_local *local) trace_rxrpc_local(local, rxrpc_local_put, n, here);
if (n == 0) - __rxrpc_put_local(local); + call_rcu(&local->rcu, rxrpc_local_rcu); } }
+/* + * Start using a local endpoint. + */ +struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) +{ + unsigned int au; + + local = rxrpc_get_local_maybe(local); + if (!local) + return NULL; + + au = atomic_fetch_add_unless(&local->active_users, 1, 0); + if (au == 0) { + rxrpc_put_local(local); + return NULL; + } + + return local; +} + +/* + * Cease using a local endpoint. Once the number of active users reaches 0, we + * start the closure of the transport in the work processor. + */ +void rxrpc_unuse_local(struct rxrpc_local *local) +{ + unsigned int au; + + au = atomic_dec_return(&local->active_users); + if (au == 0) + rxrpc_queue_local(local); + else + rxrpc_put_local(local); +} + /* * Destroy a local endpoint's socket and then hand the record to RCU to dispose * of. @@ -393,16 +423,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
_enter("%d", local->debug_id);
- /* We can get a race between an incoming call packet queueing the - * processor again and the work processor starting the destruction - * process which will shut down the UDP socket. - */ - if (local->dead) { - _leave(" [already dead]"); - return; - } - local->dead = true; - mutex_lock(&rxnet->local_mutex); list_del_init(&local->link); mutex_unlock(&rxnet->local_mutex); @@ -422,13 +442,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local) */ rxrpc_purge_queue(&local->reject_queue); rxrpc_purge_queue(&local->event_queue); - - _debug("rcu local %d", local->debug_id); - call_rcu(&local->rcu, rxrpc_local_rcu); }
/* - * Process events on an endpoint + * Process events on an endpoint. The work item carries a ref which + * we must release. */ static void rxrpc_local_processor(struct work_struct *work) { @@ -441,8 +459,10 @@ static void rxrpc_local_processor(struct work_struct *work)
do { again = false; - if (atomic_read(&local->usage) == 0) - return rxrpc_local_destroyer(local); + if (atomic_read(&local->active_users) == 0) { + rxrpc_local_destroyer(local); + break; + }
if (!skb_queue_empty(&local->reject_queue)) { rxrpc_reject_packets(local); @@ -454,6 +474,8 @@ static void rxrpc_local_processor(struct work_struct *work) again = true; } } while (again); + + rxrpc_put_local(local); }
/*
From: Jakub Kicinski jakub.kicinski@netronome.com
[ Upstream commit b3e78adcbf991a4e8b2ebb23c9889e968ec76c5f ]
Change an error message to work for any object being pinned not just programs.
Fixes: 71bb428fe2c1 ("tools: bpf: add bpftool") Signed-off-by: Jakub Kicinski jakub.kicinski@netronome.com Reviewed-by: Quentin Monnet quentin.monnet@netronome.com Signed-off-by: Daniel Borkmann daniel@iogearbox.net Signed-off-by: Sasha Levin sashal@kernel.org --- tools/bpf/bpftool/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index f7261fad45c19..647d8a4044fbd 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -236,7 +236,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
fd = get_fd_by_id(id); if (fd < 0) { - p_err("can't get prog by id (%u): %s", id, strerror(errno)); + p_err("can't open object by id (%u): %s", id, strerror(errno)); return -1; }
From: Taehee Yoo ap420073@gmail.com
[ Upstream commit 8b6381600d59871fbe44d36522272f961ab42410 ]
ixgbe_service_task() calls unregister_netdev() under rtnl_lock(). But unregister_netdev() internally calls rtnl_lock(). So deadlock would occur.
Fixes: 59dd45d550c5 ("ixgbe: firmware recovery mode") Signed-off-by: Taehee Yoo ap420073@gmail.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 57fd9ee6de665..f7c049559c1a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7893,11 +7893,8 @@ static void ixgbe_service_task(struct work_struct *work) return; } if (ixgbe_check_fw_error(adapter)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - rtnl_lock(); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) unregister_netdev(adapter->netdev); - rtnl_unlock(); - } ixgbe_service_event_complete(adapter); return; }
From: Dexuan Cui decui@microsoft.com
[ Upstream commit 6d0d779dca73cd5acb649c54f81401f93098b298 ]
This fixes a warning of "suspicious rcu_dereference_check() usage" when nload runs.
Fixes: 776e726bfb34 ("netvsc: fix RCU warning in get_stats") Signed-off-by: Dexuan Cui decui@microsoft.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/hyperv/netvsc_drv.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3544e19915792..e8fce6d715ef0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + struct netvsc_device *nvdev; struct netvsc_vf_pcpu_stats vf_tot; int i;
+ rcu_read_lock(); + + nvdev = rcu_dereference(ndev_ctx->nvdev); if (!nvdev) - return; + goto out;
netdev_stats_to_stats64(t, &net->stats);
@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } +out: + rcu_read_unlock(); }
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
From: Nathan Chancellor natechancellor@gmail.com
[ Upstream commit 125b7e0949d4e72b15c2b1a1590f8cece985a918 ]
clang warns:
drivers/net/ethernet/toshiba/tc35815.c:1507:30: warning: use of logical '&&' with constant operand [-Wconstant-logical-operand] if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) ^ ~~~~~~~~~~~~ drivers/net/ethernet/toshiba/tc35815.c:1507:30: note: use '&' for a bitwise operation if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) ^~ & drivers/net/ethernet/toshiba/tc35815.c:1507:30: note: remove constant to silence this warning if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) ~^~~~~~~~~~~~~~~ 1 warning generated.
Explicitly check that NET_IP_ALIGN is not zero, which matches how this is checked in other parts of the tree. Because NET_IP_ALIGN is a build time constant, this check will be constant folded away during optimization.
Fixes: 82a9928db560 ("tc35815: Enable StripCRC feature") Link: https://github.com/ClangBuiltLinux/linux/issues/608 Signed-off-by: Nathan Chancellor natechancellor@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/toshiba/tc35815.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index c50a9772f4aff..3b5a26b05295f 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit) pci_unmap_single(lp->pci_dev, lp->rx_skbs[cur_bd].skb_dma, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) + if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0) memmove(skb->data, skb->data - NET_IP_ALIGN, pkt_len); data = skb_put(skb, pkt_len);
From: Matthias Kaehlcke mka@chromium.org
[ Upstream commit 8059ba0bd0e4694e51c2ee6438a77b325f06c0d5 ]
On WCN3990 downloading the NVM sometimes fails with a "TLV response size mismatch" error:
[ 174.949955] Bluetooth: btqca.c:qca_download_firmware() hci0: QCA Downloading qca/crnv21.bin [ 174.958718] Bluetooth: btqca.c:qca_tlv_send_segment() hci0: QCA TLV response size mismatch
It seems the controller needs a short time after downloading the firmware before it is ready for the NVM. A delay as short as 1 ms seems sufficient, make it 10 ms just in case. No event is received during the delay, hence we don't just silently drop an extra event.
Signed-off-by: Matthias Kaehlcke mka@chromium.org Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/bluetooth/btqca.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index aff1d22223bd4..0ee5acb685a10 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -350,6 +350,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; }
+ /* Give the controller some time to get ready to receive the NVM */ + msleep(10); + /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (qca_is_wcn399x(soc_type))
From: Harish Bandi c-hbandi@codeaurora.org
[ Upstream commit a2780889e247561744dd8efbd3478a1999b72ae3 ]
WCN399x chips are coex chips, it needs a VS pre shutdown command while turning off the BT. So that chip can inform BT is OFF to other active clients.
Signed-off-by: Harish Bandi c-hbandi@codeaurora.org Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/bluetooth/btqca.c | 21 +++++++++++++++++++++ drivers/bluetooth/btqca.h | 7 +++++++ drivers/bluetooth/hci_qca.c | 3 +++ 3 files changed, 31 insertions(+)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 0ee5acb685a10..ee25e6ae1a098 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev) return 0; }
+int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + bt_dev_dbg(hdev, "QCA pre shutdown cmd"); + + skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0, + NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); + static void qca_tlv_check_data(struct rome_config *config, const struct firmware *fw) { diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index e9c9999596035..f2a9e576a86ce 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -13,6 +13,7 @@ #define EDL_PATCH_TLV_REQ_CMD (0x1E) #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) #define MAX_SIZE_PER_TLV_SEGMENT (243) +#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
#define EDL_CMD_REQ_RES_EVT (0x00) #define EDL_PATCH_VER_RES_EVT (0x19) @@ -130,6 +131,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, enum qca_btsoc_type soc_type, u32 soc_ver); int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) { return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; @@ -161,4 +163,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) { return false; } + +static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} #endif diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index f41fb2c02e4fd..d88b024eaf566 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1319,6 +1319,9 @@ static int qca_power_off(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev);
+ /* Perform pre shutdown command */ + qca_send_pre_shutdown_cmd(hdev); + qca_power_shutdown(hu); return 0; }
From: Fabian Henneke fabian.henneke@gmail.com
[ Upstream commit 48d9cc9d85dde37c87abb7ac9bbec6598ba44b56 ]
Let hidp_send_message return the number of successfully queued bytes instead of an unconditional 0.
With the return value fixed to 0, other drivers relying on hidp, such as hidraw, can not return meaningful values from their respective implementations of write(). In particular, with the current behavior, a hidraw device's write() will have different return values depending on whether the device is connected via USB or Bluetooth, which makes it harder to abstract away the transport layer.
Signed-off-by: Fabian Henneke fabian.henneke@gmail.com Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/hidp/core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 5abd423b55fa9..8d889969ae7ed 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock, { struct sk_buff *skb; struct sock *sk = sock->sk; + int ret;
BT_DBG("session %p data %p size %d", session, data, size);
@@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock, }
skb_put_u8(skb, hdr); - if (data && size > 0) + if (data && size > 0) { skb_put_data(skb, data, size); + ret = size; + } else { + ret = 0; + }
skb_queue_tail(transmit, skb); wake_up_interruptible(sk_sleep(sk));
- return 0; + return ret; }
static int hidp_send_ctrl_message(struct hidp_session *session,
From: Julian Wiedmann jwi@linux.ibm.com
[ Upstream commit 072f79400032f74917726cf76f4248367ea2b5b8 ]
Callbacks for a cmd reply run outside the protection of card->lock, to allow for additional cmds to be issued & enqueued in parallel.
When qeth_send_control_data() bails out for a cmd without having received a reply (eg. due to timeout), its callback may concurrently be processing a reply that just arrived. In this case, the callback potentially accesses a stale reply->reply_param area that eg. was on-stack and has already been released.
To avoid this race, add some locking so that qeth_send_control_data() can (1) wait for a concurrently running callback, and (2) zap any pending callback that still wants to run.
Signed-off-by: Julian Wiedmann jwi@linux.ibm.com Signed-off-by: Jakub Kicinski jakub.kicinski@netronome.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/s390/net/qeth_core.h | 1 + drivers/s390/net/qeth_core_main.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 784a2e76a1b04..c5f60f95e8db9 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -640,6 +640,7 @@ struct qeth_seqno { struct qeth_reply { struct list_head list; struct completion received; + spinlock_t lock; int (*callback)(struct qeth_card *, struct qeth_reply *, unsigned long); u32 seqno; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b1823d75dd35c..6b8f99e7d8a81 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -548,6 +548,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); init_completion(&reply->received); + spin_lock_init(&reply->lock); } return reply; } @@ -832,6 +833,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
if (!reply->callback) { rc = 0; + goto no_callback; + } + + spin_lock_irqsave(&reply->lock, flags); + if (reply->rc) { + /* Bail out when the requestor has already left: */ + rc = reply->rc; } else { if (cmd) { reply->offset = (u16)((char *)cmd - (char *)iob->data); @@ -840,7 +848,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, rc = reply->callback(card, reply, (unsigned long)iob); } } + spin_unlock_irqrestore(&reply->lock, flags);
+no_callback: if (rc <= 0) qeth_notify_reply(reply, rc); qeth_put_reply(reply); @@ -1880,6 +1890,16 @@ static int qeth_send_control_data(struct qeth_card *card, int len, rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_reply(card, reply); + + if (reply_cb) { + /* Wait until the callback for a late reply has completed: */ + spin_lock_irq(&reply->lock); + if (rc) + /* Zap any callback that's still pending: */ + reply->rc = rc; + spin_unlock_irq(&reply->lock); + } + if (!rc) rc = reply->rc; qeth_put_reply(reply);
From: Thomas Falcon tlfalcon@linux.ibm.com
[ Upstream commit 66cf4710b23ab2adda11155684a2c8826f4fe732 ]
The ibm,mac-address-filters property defines the maximum number of addresses the hypervisor's multicast filter list can support. It is encoded as a big-endian integer in the OF device tree, but the virtual ethernet driver does not convert it for use by little-endian systems. As a result, the driver is not behaving as it should on affected systems when a large number of multicast addresses are assigned to the device.
Reported-by: Hangbin Liu liuhangbin@gmail.com Signed-off-by: Thomas Falcon tlfalcon@linux.ibm.com Signed-off-by: Jakub Kicinski jakub.kicinski@netronome.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/ibm/ibmveth.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index d654c234aaf75..c5be4ebd84373 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct net_device *netdev; struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; - unsigned int *mcastFilterSize_p; + __be32 *mcastFilterSize_p; long ret; unsigned long ret_attr;
@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) return -EINVAL; }
- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, - VETH_MCAST_FILTER_SIZE, NULL); + mcastFilterSize_p = (__be32 *)vio_get_attribute(dev, + VETH_MCAST_FILTER_SIZE, + NULL); if (!mcastFilterSize_p) { dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " "attribute\n"); @@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev; adapter->netdev = netdev; - adapter->mcastFilterSize = *mcastFilterSize_p; + adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); adapter->pool_config = 0;
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
From: YueHaibing yuehaibing@huawei.com
[ Upstream commit 68e03b85474a51ec1921b4d13204782594ef7223 ]
when do randbuilding, I got this error:
In file included from drivers/hwmon/pmbus/ucd9000.c:19:0: ./include/linux/gpio/driver.h:576:1: error: redefinition of gpiochip_add_pin_range gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, ^~~~~~~~~~~~~~~~~~~~~~ In file included from drivers/hwmon/pmbus/ucd9000.c:18:0: ./include/linux/gpio.h:245:1: note: previous definition of gpiochip_add_pin_range was here gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, ^~~~~~~~~~~~~~~~~~~~~~
Reported-by: Hulk Robot hulkci@huawei.com Fixes: 964cb341882f ("gpio: move pincontrol calls to <linux/gpio/driver.h>") Signed-off-by: YueHaibing yuehaibing@huawei.com Link: https://lore.kernel.org/r/20190731123814.46624-1-yuehaibing@huawei.com Signed-off-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/gpio.h | 24 ------------------------ 1 file changed, 24 deletions(-)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 39745b8bdd65d..b3115d1a7d494 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq) return -EINVAL; }
-static inline int -gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, - unsigned int gpio_offset, unsigned int pin_offset, - unsigned int npins) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline int -gpiochip_add_pingroup_range(struct gpio_chip *chip, - struct pinctrl_dev *pctldev, - unsigned int gpio_offset, const char *pin_group) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline void -gpiochip_remove_pin_ranges(struct gpio_chip *chip) -{ - WARN_ON(1); -} - static inline int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) {
From: Pablo Neira Ayuso pablo@netfilter.org
[ Upstream commit dfe42be15fde16232340b8b2a57c359f51cc10d9 ]
TCP rst and fin packets do not qualify to place a flow into the flowtable. Most likely there will be no more packets after connection closure. Without this patch, this flow entry expires and connection tracking picks up the entry in ESTABLISHED state using the fixup timeout, which makes this look inconsistent to the user for a connection that is actually already closed.
Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/netfilter/nft_flow_offload.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index aa5f571d43619..060a4ed46d5e6 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, { struct nft_flow_offload *priv = nft_expr_priv(expr); struct nf_flowtable *flowtable = &priv->flowtable->data; + struct tcphdr _tcph, *tcph = NULL; enum ip_conntrack_info ctinfo; struct nf_flow_route route; struct flow_offload *flow; enum ip_conntrack_dir dir; - bool is_tcp = false; struct nf_conn *ct; int ret;
@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { case IPPROTO_TCP: - is_tcp = true; + tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, + sizeof(_tcph), &_tcph); + if (unlikely(!tcph || tcph->fin || tcph->rst)) + goto out; break; case IPPROTO_UDP: break; @@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (!flow) goto err_flow_alloc;
- if (is_tcp) { + if (tcph) { ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; }
From: David Howells dhowells@redhat.com
[ Upstream commit b00df840fb4004b7087940ac5f68801562d0d2de ]
When a local endpoint (struct rxrpc_local) ceases to be in use by any AF_RXRPC sockets, it starts the process of being destroyed, but this doesn't cause it to be removed from the namespace endpoint list immediately as tearing it down isn't trivial and can't be done in softirq context, so it gets deferred.
If a new socket comes along that wants to bind to the same endpoint, a new rxrpc_local object will be allocated and rxrpc_lookup_local() will use list_replace() to substitute the new one for the old.
Then, when the dying object gets to rxrpc_local_destroyer(), it is removed unconditionally from whatever list it is on by calling list_del_init().
However, list_replace() doesn't reset the pointers in the replaced list_head and so the list_del_init() will likely corrupt the local endpoints list.
Fix this by using list_replace_init() instead.
Fixes: 730c5fd42c1e ("rxrpc: Fix local endpoint refcounting") Reported-by: syzbot+193e29e9387ea5837f1d@syzkaller.appspotmail.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/rxrpc/local_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 9798159ee65fa..918bffca3ddb6 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -283,7 +283,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net, goto sock_error;
if (cursor != &rxnet->local_endpoints) - list_replace(cursor, &local->link); + list_replace_init(cursor, &local->link); else list_add_tail(&local->link, cursor); age = "new";
From: David Howells dhowells@redhat.com
[ Upstream commit 06d9532fa6b34f12a6d75711162d47c17c1add72 ]
rxrpc_queue_local() attempts to queue the local endpoint it is given and then, if successful, prints a trace line. The trace line includes the current usage count - but we're not allowed to look at the local endpoint at this point as we passed our ref on it to the workqueue.
Fix this by reading the usage count before queuing the work item.
Also fix the reading of local->debug_id for trace lines, which must be done with the same consideration as reading the usage count.
Fixes: 09d2bf595db4 ("rxrpc: Add a tracepoint to track rxrpc_local refcounting") Reported-by: syzbot+78e71c5bab4f76a6a719@syzkaller.appspotmail.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- include/trace/events/rxrpc.h | 6 +++--- net/rxrpc/local_object.c | 19 ++++++++++--------- 2 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index cc1d060cbf133..fa06b528c73c5 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -498,10 +498,10 @@ rxrpc_tx_points; #define E_(a, b) { a, b }
TRACE_EVENT(rxrpc_local, - TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, + TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op, int usage, const void *where),
- TP_ARGS(local, op, usage, where), + TP_ARGS(local_debug_id, op, usage, where),
TP_STRUCT__entry( __field(unsigned int, local ) @@ -511,7 +511,7 @@ TRACE_EVENT(rxrpc_local, ),
TP_fast_assign( - __entry->local = local->debug_id; + __entry->local = local_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 918bffca3ddb6..68e9342fd4335 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -93,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); local->srx.srx_service = 0; - trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); + trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL); }
_leave(" = %p", local); @@ -321,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) int n;
n = atomic_inc_return(&local->usage); - trace_rxrpc_local(local, rxrpc_local_got, n, here); + trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); return local; }
@@ -335,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) if (local) { int n = atomic_fetch_add_unless(&local->usage, 1, 0); if (n > 0) - trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); + trace_rxrpc_local(local->debug_id, rxrpc_local_got, + n + 1, here); else local = NULL; } @@ -343,16 +344,16 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) }
/* - * Queue a local endpoint unless it has become unreferenced and pass the - * caller's reference to the work item. + * Queue a local endpoint and pass the caller's reference to the work item. */ void rxrpc_queue_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); + unsigned int debug_id = local->debug_id; + int n = atomic_read(&local->usage);
if (rxrpc_queue_work(&local->processor)) - trace_rxrpc_local(local, rxrpc_local_queued, - atomic_read(&local->usage), here); + trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); else rxrpc_put_local(local); } @@ -367,7 +368,7 @@ void rxrpc_put_local(struct rxrpc_local *local)
if (local) { n = atomic_dec_return(&local->usage); - trace_rxrpc_local(local, rxrpc_local_put, n, here); + trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
if (n == 0) call_rcu(&local->rcu, rxrpc_local_rcu); @@ -454,7 +455,7 @@ static void rxrpc_local_processor(struct work_struct *work) container_of(work, struct rxrpc_local, processor); bool again;
- trace_rxrpc_local(local, rxrpc_local_processing, + trace_rxrpc_local(local->debug_id, rxrpc_local_processing, atomic_read(&local->usage), NULL);
do {
From: Alexandre Courbot acourbot@chromium.org
[ Upstream commit 4c6f3196e6ea111c456c6086dc3f57d4706b0b2d ]
PRIME buffers should be imported using the DMA device. To this end, use a custom import function that mimics drm_gem_prime_import_dev(), but passes the correct device.
Fixes: 119f5173628aa ("drm/mediatek: Add DRM Driver for Mediatek SoC MT8173.") Signed-off-by: Alexandre Courbot acourbot@chromium.org Signed-off-by: CK Hu ck.hu@mediatek.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 95fdbd0fbcace..8b18a00a58c7e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -320,6 +320,18 @@ static const struct file_operations mtk_drm_fops = { .compat_ioctl = drm_compat_ioctl, };
+/* + * We need to override this because the device used to import the memory is + * not dev->dev, as drm_gem_prime_import() expects. + */ +struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct mtk_drm_private *private = dev->dev_private; + + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); +} + static struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, @@ -331,7 +343,7 @@ static struct drm_driver mtk_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, + .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf,
From: Alexandre Courbot acourbot@chromium.org
[ Upstream commit 070955558e820b9a89c570b91b1f21762f62b288 ]
This driver requires imported PRIME buffers to appear contiguously in its IO address space. Make sure this is the case by setting the maximum DMA segment size to a more suitable value than the default 64KB.
Signed-off-by: Alexandre Courbot acourbot@chromium.org Reviewed-by: Tomasz Figa tfiga@chromium.org Signed-off-by: CK Hu ck.hu@mediatek.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 35 ++++++++++++++++++++++++-- drivers/gpu/drm/mediatek/mtk_drm_drv.h | 2 ++ 2 files changed, 35 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 8b18a00a58c7e..c021d4c8324f5 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -213,6 +213,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct mtk_drm_private *private = drm->dev_private; struct platform_device *pdev; struct device_node *np; + struct device *dma_dev; int ret;
if (!iommu_present(&platform_bus_type)) @@ -275,7 +276,29 @@ static int mtk_drm_kms_init(struct drm_device *drm) goto err_component_unbind; }
- private->dma_dev = &pdev->dev; + dma_dev = &pdev->dev; + private->dma_dev = dma_dev; + + /* + * Configure the DMA segment size to make sure we get contiguous IOVA + * when importing PRIME buffers. + */ + if (!dma_dev->dma_parms) { + private->dma_parms_allocated = true; + dma_dev->dma_parms = + devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), + GFP_KERNEL); + } + if (!dma_dev->dma_parms) { + ret = -ENOMEM; + goto err_component_unbind; + } + + ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); + if (ret) { + dev_err(dma_dev, "Failed to set DMA segment size\n"); + goto err_unset_dma_parms; + }
/* * We don't use the drm_irq_install() helpers provided by the DRM @@ -285,13 +308,16 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) - goto err_component_unbind; + goto err_unset_dma_parms;
drm_kms_helper_poll_init(drm); drm_mode_config_reset(drm);
return 0;
+err_unset_dma_parms: + if (private->dma_parms_allocated) + dma_dev->dma_parms = NULL; err_component_unbind: component_unbind_all(drm->dev, drm); err_config_cleanup: @@ -302,9 +328,14 @@ static int mtk_drm_kms_init(struct drm_device *drm)
static void mtk_drm_kms_deinit(struct drm_device *drm) { + struct mtk_drm_private *private = drm->dev_private; + drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm);
+ if (private->dma_parms_allocated) + private->dma_dev->dma_parms = NULL; + component_unbind_all(drm->dev, drm); drm_mode_config_cleanup(drm); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 598ff3e704465..e03fea12ff598 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -51,6 +51,8 @@ struct mtk_drm_private { } commit;
struct drm_atomic_state *suspend_state; + + bool dma_parms_allocated; };
extern struct platform_driver mtk_ddp_driver;
From: Bill Kuzeja William.Kuzeja@stratus.com
[ Upstream commit 26fa656e9a0cbccddf7db132ea020d2169dbe46e ]
If HBA initialization fails unexpectedly (exiting via probe_failed:), we may fail to free vha->gnl.l. So that we don't attempt to double free, set this pointer to NULL after a free and check for NULL at probe_failed: so we know whether or not to call dma_free_coherent.
Signed-off-by: Bill Kuzeja william.kuzeja@stratus.com Acked-by: Himanshu Madhani hmadhani@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/qla2xxx/qla_attr.c | 2 ++ drivers/scsi/qla2xxx/qla_os.c | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 8d560c562e9c0..6b7b390b2e522 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma);
+ vha->gnl.l = NULL; + vfree(vha->scan.l);
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index d056f5e7cf930..794478e5f7ec8 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3440,6 +3440,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0;
probe_failed: + if (base_vha->gnl.l) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + } + if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; @@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev) if (!atomic_read(&pdev->enable_cnt)) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); - + base_vha->gnl.l = NULL; scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); @@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ base_vha->gnl.l = NULL; + vfree(base_vha->scan.l);
if (IS_QLAFX00(ha)) @@ -4817,6 +4825,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, "Alloc failed for scan database.\n"); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; scsi_remove_host(vha->host); return NULL; }
From: Dmitry Fomichev dmitry.fomichev@wdc.com
[ Upstream commit a86a75865ff4d8c05f355d1750a5250aec89ab15 ]
In tcmu_handle_completion() function, the variable called read_len is always initialized with a value taken from se_cmd structure. If this function is called to complete an expired (timed out) out command, the session command pointed by se_cmd is likely to be already deallocated by the target core at that moment. As the result, this access triggers a use-after-free warning from KASAN.
This patch fixes the code not to touch se_cmd when completing timed out TCMU commands. It also resets the pointer to se_cmd at the time when the TCMU_CMD_BIT_EXPIRED flag is set because it is going to become invalid after calling target_complete_cmd() later in the same function, tcmu_check_expired_cmd().
Signed-off-by: Dmitry Fomichev dmitry.fomichev@wdc.com Acked-by: Mike Christie mchristi@redhat.com Reviewed-by: Damien Le Moal damien.lemoal@wdc.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/target/target_core_user.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index b43d6385a1a09..95b2371fb67b6 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; bool read_len_valid = false; - uint32_t read_len = se_cmd->data_length; + uint32_t read_len;
/* * cmd has been completed already from timeout, just reclaim * data area space and free cmd */ - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON_ONCE(se_cmd); goto out; + }
list_del_init(&cmd->queue_entry);
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * goto done; }
+ read_len = se_cmd->data_length; if (se_cmd->data_direction == DMA_FROM_DEVICE && (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { read_len_valid = true; @@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) */ scsi_status = SAM_STAT_CHECK_CONDITION; list_del_init(&cmd->queue_entry); + cmd->se_cmd = NULL; } else { list_del_init(&cmd->queue_entry); idr_remove(&udev->commands, id); @@ -2024,6 +2028,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); if (err_level == 1) { /*
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit c554336efa9bbc28d6ec14efbee3c7d63c61a34f ]
In blocked_fl_write(), 't' is not deallocated if bitmap_parse_user() fails, leading to a memory leak bug. To fix this issue, free t before returning the error.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 02959035ed3f2..d692251ee252c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); - if (err) + if (err) { + kvfree(t); return err; + }
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); kvfree(t);
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit 92cd0f0be3d7adb63611c28693ec0399beded837 ]
This test is only covering various edge cases of the KVM_SET_NESTED_STATE ioctl. Running the VM does not really add anything.
Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../kvm/x86_64/vmx_set_nested_state_test.c | 15 --------------- 1 file changed, 15 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c index ed7218d166da6..a99fc66dafeb6 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c @@ -27,22 +27,13 @@
void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) { - volatile struct kvm_run *run; - vcpu_nested_state_set(vm, VCPU_ID, state, false); - run = vcpu_state(vm, VCPU_ID); - vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); }
void test_nested_state_expect_errno(struct kvm_vm *vm, struct kvm_nested_state *state, int expected_errno) { - volatile struct kvm_run *run; int rv;
rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); @@ -50,12 +41,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm, "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", strerror(expected_errno), expected_errno, rv, strerror(errno), errno); - run = vcpu_state(vm, VCPU_ID); - vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); }
void test_nested_state_expect_einval(struct kvm_vm *vm,
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit 65efa61dc0d536d5f0602c33ee805a57cc07e9dc ]
There are two tests already enabling eVMCS and a third is coming. Add a function that enables the capability and tests the result.
Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/kvm/include/evmcs.h | 2 ++ tools/testing/selftests/kvm/lib/x86_64/vmx.c | 20 +++++++++++++++++++ .../testing/selftests/kvm/x86_64/evmcs_test.c | 15 ++------------ .../selftests/kvm/x86_64/hyperv_cpuid.c | 12 ++++------- 4 files changed, 28 insertions(+), 21 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h index 4059014d93ea1..4912d23844bc6 100644 --- a/tools/testing/selftests/kvm/include/evmcs.h +++ b/tools/testing/selftests/kvm/include/evmcs.h @@ -220,6 +220,8 @@ struct hv_enlightened_vmcs { struct hv_enlightened_vmcs *current_evmcs; struct hv_vp_assist_page *current_vp_assist;
+int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); + static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) { u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index fe56d159d65fd..52b6491ed7061 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -14,6 +14,26 @@
bool enable_evmcs;
+int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) +{ + uint16_t evmcs_ver; + + struct kvm_enable_cap enable_evmcs_cap = { + .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, + .args[0] = (unsigned long)&evmcs_ver + }; + + vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); + + /* KVM should return supported EVMCS version range */ + TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && + (evmcs_ver & 0xff) > 0, + "Incorrect EVMCS version range: %x:%x\n", + evmcs_ver & 0xff, evmcs_ver >> 8); + + return evmcs_ver; +} + /* Allocate memory regions for nested VMX tests. * * Input Args: diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index 241919ef1eaca..9f250c39c9bb8 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -79,11 +79,6 @@ int main(int argc, char *argv[]) struct kvm_x86_state *state; struct ucall uc; int stage; - uint16_t evmcs_ver; - struct kvm_enable_cap enable_evmcs_cap = { - .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, - .args[0] = (unsigned long)&evmcs_ver - };
/* Create VM */ vm = vm_create_default(VCPU_ID, 0, guest_code); @@ -96,13 +91,7 @@ int main(int argc, char *argv[]) exit(KSFT_SKIP); }
- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); - - /* KVM should return supported EVMCS version range */ - TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && - (evmcs_ver & 0xff) > 0, - "Incorrect EVMCS version range: %x:%x\n", - evmcs_ver & 0xff, evmcs_ver >> 8); + vcpu_enable_evmcs(vm, VCPU_ID);
run = vcpu_state(vm, VCPU_ID);
@@ -146,7 +135,7 @@ int main(int argc, char *argv[]) kvm_vm_restart(vm, O_RDWR); vm_vcpu_add(vm, VCPU_ID, 0, 0); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); - vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); + vcpu_enable_evmcs(vm, VCPU_ID); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); free(state); diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index f72b3043db0eb..ee59831fbc984 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -18,6 +18,7 @@ #include "test_util.h" #include "kvm_util.h" #include "processor.h" +#include "vmx.h"
#define VCPU_ID 0
@@ -106,12 +107,7 @@ int main(int argc, char *argv[]) { struct kvm_vm *vm; int rv; - uint16_t evmcs_ver; struct kvm_cpuid2 *hv_cpuid_entries; - struct kvm_enable_cap enable_evmcs_cap = { - .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, - .args[0] = (unsigned long)&evmcs_ver - };
/* Tell stdout not to buffer its content */ setbuf(stdout, NULL); @@ -136,14 +132,14 @@ int main(int argc, char *argv[])
free(hv_cpuid_entries);
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); - - if (rv) { + if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { fprintf(stderr, "Enlightened VMCS is unsupported, skip related test\n"); goto vm_free; }
+ vcpu_enable_evmcs(vm, VCPU_ID); + hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); if (!hv_cpuid_entries) return 1;
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit c930e19790bbbff31c018009907c813fa0925f63 ]
vmx_set_nested_state_test is trying to use the KVM_STATE_NESTED_EVMCS without enabling enlightened VMCS first. Correct the outcome of the test, and actually test that it succeeds after the capability is enabled.
Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../kvm/x86_64/vmx_set_nested_state_test.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c index a99fc66dafeb6..853e370e8a394 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c @@ -25,6 +25,8 @@ #define VMCS12_REVISION 0x11e57ed0 #define VCPU_ID 5
+bool have_evmcs; + void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) { vcpu_nested_state_set(vm, VCPU_ID, state, false); @@ -75,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size) { memset(state, 0, size); state->flags = KVM_STATE_NESTED_GUEST_MODE | - KVM_STATE_NESTED_RUN_PENDING | - KVM_STATE_NESTED_EVMCS; + KVM_STATE_NESTED_RUN_PENDING; + if (have_evmcs) + state->flags |= KVM_STATE_NESTED_EVMCS; state->format = 0; state->size = size; state->hdr.vmx.vmxon_pa = 0x1000; @@ -126,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm) /* * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without * setting the nested state but flags other than eVMCS must be clear. + * The eVMCS flag can be set if the enlightened VMCS capability has + * been enabled. */ set_default_vmx_state(state, state_sz); state->hdr.vmx.vmxon_pa = -1ull; state->hdr.vmx.vmcs12_pa = -1ull; test_nested_state_expect_einval(vm, state);
- state->flags = KVM_STATE_NESTED_EVMCS; + state->flags &= KVM_STATE_NESTED_EVMCS; + if (have_evmcs) { + test_nested_state_expect_einval(vm, state); + vcpu_enable_evmcs(vm, VCPU_ID); + } test_nested_state(vm, state);
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ @@ -217,6 +226,8 @@ int main(int argc, char *argv[]) struct kvm_nested_state state; struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+ have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS); + if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); exit(KSFT_SKIP);
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit 6f967f8b1be7001b31c46429f2ee7d275af2190f ]
If oct->fn_list.enable_io_queues() fails, no cleanup is executed, leading to memory/resource leaks. To fix this issue, invoke octeon_delete_instr_queue() before returning from the function.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/cavium/liquidio/request_manager.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index fcf20a8f92d94..6a823710987da 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -239,8 +239,10 @@ int octeon_setup_iq(struct octeon_device *oct, }
oct->num_iqs++; - if (oct->fn_list.enable_io_queues(oct)) + if (oct->fn_list.enable_io_queues(oct)) { + octeon_delete_instr_queue(oct, iq_no); return 1; + }
return 0; }
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit 20fb7c7a39b5c719e2e619673b5f5729ee7d2306 ]
In myri10ge_probe(), myri10ge_alloc_slices() is invoked to allocate slices related structures. Later on, myri10ge_request_irq() is used to get an irq. However, if this process fails, the allocated slices related structures are not deallocated, leading to memory leaks. To fix this issue, revise the target label of the goto statement to 'abort_with_slices'.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index d8b7fba96d58e..337b0cbfd153e 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * setup (if available). */ status = myri10ge_request_irq(mgp); if (status != 0) - goto abort_with_firmware; + goto abort_with_slices; myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
From: Stephen Boyd sboyd@kernel.org
[ Upstream commit 4f8c6aba37da199155a121c6cdc38505a9eb0259 ]
Calls to clk_core_get() will return ERR_PTR(-EINVAL) if we've started migrating a clk driver to use the DT based style of specifying parents but we haven't made any DT updates yet. This happens when we pass a non-NULL value as the 'name' argument of of_parse_clkspec(). That function returns -EINVAL in such a situation, instead of -ENOENT like we expected. The return value comes back up to clk_core_fill_parent_index() which proceeds to skip calling clk_core_lookup() because the error pointer isn't equal to -ENOENT, it's -EINVAL.
Furthermore, we blindly overwrite the error pointer returned by clk_core_get() with NULL when there isn't a legacy .name member specified in the parent map. This isn't too bad right now because we don't really care to differentiate NULL from an error, but in the future we should only try to do a legacy lookup if we know we might find something. This way DT lookups that fail don't try to lookup based on strings when there isn't any string to match, hiding the error from DT parsing.
Fix both these problems so that clk provider drivers can use the new style of parent mapping without having to also update their DT at the same time. This patch is based on an earlier patch from Taniya Das which checked for -EINVAL in addition to -ENOENT return values from clk_core_get().
Fixes: 601b6e93304a ("clk: Allow parents to be specified via clkspec index") Cc: Taniya Das tdas@codeaurora.org Cc: Jerome Brunet jbrunet@baylibre.com Cc: Chen-Yu Tsai wens@csie.org Reported-by: Taniya Das tdas@codeaurora.org Signed-off-by: Stephen Boyd sboyd@kernel.org Link: https://lkml.kernel.org/r/20190813214147.34394-1-sboyd@kernel.org Tested-by: Taniya Das tdas@codeaurora.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/clk.c | 46 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 12 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 87b410d6e51de..498cd7bbe8984 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name) return NULL; }
+#ifdef CONFIG_OF +static int of_parse_clkspec(const struct device_node *np, int index, + const char *name, struct of_phandle_args *out_args); +static struct clk_hw * +of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); +#else +static inline int of_parse_clkspec(const struct device_node *np, int index, + const char *name, + struct of_phandle_args *out_args) +{ + return -ENOENT; +} +static inline struct clk_hw * +of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) +{ + return ERR_PTR(-ENOENT); +} +#endif + /** * clk_core_get - Find the clk_core parent of a clk * @core: clk to find parent of @@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name) * }; * * Returns: -ENOENT when the provider can't be found or the clk doesn't - * exist in the provider. -EINVAL when the name can't be found. NULL when the - * provider knows about the clk but it isn't provided on this system. + * exist in the provider or the name can't be found in the DT node or + * in a clkdev lookup. NULL when the provider knows about the clk but it + * isn't provided on this system. * A valid clk_core pointer when the clk can be found in the provider. */ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) @@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) struct device *dev = core->dev; const char *dev_id = dev ? dev_name(dev) : NULL; struct device_node *np = core->of_node; + struct of_phandle_args clkspec;
- if (np && (name || index >= 0)) - hw = of_clk_get_hw(np, index, name); - - /* - * If the DT search above couldn't find the provider or the provider - * didn't know about this clk, fallback to looking up via clkdev based - * clk_lookups - */ - if (PTR_ERR(hw) == -ENOENT && name) + if (np && (name || index >= 0) && + !of_parse_clkspec(np, index, name, &clkspec)) { + hw = of_clk_get_hw_from_clkspec(&clkspec); + of_node_put(clkspec.np); + } else if (name) { + /* + * If the DT search above couldn't find the provider fallback to + * looking up via clkdev based clk_lookups. + */ hw = clk_find_hw(dev_id, name); + }
if (IS_ERR(hw)) return ERR_CAST(hw); @@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index) parent = ERR_PTR(-EPROBE_DEFER); } else { parent = clk_core_get(core, index); - if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) + if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name) parent = clk_core_lookup(entry->name); }
From: Martin Blumenstingl martin.blumenstingl@googlemail.com
[ Upstream commit 24876f09a7dfe36a82f53d304d8c1bceb3257a0f ]
Don't compare the parent clock name with a NULL name in the clk_parent_map. This prevents a kernel crash when passing NULL core->parents[i].name to strcmp().
An example which triggered this is a mux clock with four parents when each of them is referenced in the clock driver using clk_parent_data.fw_name and then calling clk_set_parent(clk, 3rd_parent) on this mux. In this case the first parent is also the HW default so core->parents[i].hw is populated when the clock is registered. Calling clk_set_parent(clk, 3rd_parent) will then go through all parents and skip the first parent because it's hw pointer doesn't match. For the second parent no hw pointer is cached yet and clk_core_get(core, 1) returns a non-matching pointer (which is correct because we are comparing the second with the third parent). Comparing the result of clk_core_get(core, 2) with the requested parent gives a match. However we don't reach this point because right after the clk_core_get(core, 1) mismatch the old code tried to !strcmp(parent->name, NULL) (where the second argument is actually core->parents[i].name, but that was never populated by the clock driver).
Signed-off-by: Martin Blumenstingl martin.blumenstingl@googlemail.com Link: https://lkml.kernel.org/r/20190815223155.21384-1-martin.blumenstingl@googlem... Fixes: fc0c209c147f ("clk: Allow parents to be specified without string names") Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/clk.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 498cd7bbe8984..3a4961dc58313 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1657,7 +1657,8 @@ static int clk_fetch_parent_index(struct clk_core *core, break;
/* Fallback to comparing globally unique names */ - if (!strcmp(parent->name, core->parents[i].name)) + if (core->parents[i].name && + !strcmp(parent->name, core->parents[i].name)) break; }
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit b9cbf8a64865b50fd0f4a3915fa00ac7365cdf8f ]
In lan78xx_probe(), a new urb is allocated through usb_alloc_urb() and saved to 'dev->urb_intr'. However, in the following execution, if an error occurs, 'dev->urb_intr' is not deallocated, leading to memory leaks. To fix this issue, invoke usb_free_urb() to free the allocated urb before returning from the function.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/usb/lan78xx.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 3d92ea6fcc02b..f033fee225a11 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); - goto out3; + goto out4; }
usb_set_intfdata(intf, dev); @@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_phy_init(dev); if (ret < 0) - goto out4; + goto out5;
return 0;
-out4: +out5: unregister_netdev(netdev); +out4: + usb_free_urb(dev->urb_intr); out3: lan78xx_unbind(dev, intf); out2:
From: "Darrick J. Wong" darrick.wong@oracle.com
[ Upstream commit edc58dd0123b552453a74369bd0c8d890b497b4b ]
When dedupe wants to use the page cache to compare parts of two files for dedupe, we must be very careful to handle locking correctly. The current code doesn't do this. It must lock and unlock the page only once if the two pages are the same, since the overlapping range check doesn't catch this when blocksize < pagesize. If the pages are distinct but from the same file, we must observe page locking order and lock them in order of increasing offset to avoid clashing with writeback locking.
Fixes: 876bec6f9bbfcb3 ("vfs: refactor clone/dedupe_file_range common functions") Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Reviewed-by: Bill O'Donnell billodo@redhat.com Reviewed-by: Matthew Wilcox (Oracle) willy@infradead.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/read_write.c | 49 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 8 deletions(-)
diff --git a/fs/read_write.c b/fs/read_write.c index c543d965e2880..e8b0f1192a3a4 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1776,10 +1776,7 @@ static int generic_remap_check_len(struct inode *inode_in, return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; }
-/* - * Read a page's worth of file data into the page cache. Return the page - * locked. - */ +/* Read a page's worth of file data into the page cache. */ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) { struct page *page; @@ -1791,10 +1788,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) put_page(page); return ERR_PTR(-EIO); } - lock_page(page); return page; }
+/* + * Lock two pages, ensuring that we lock in offset order if the pages are from + * the same file. + */ +static void vfs_lock_two_pages(struct page *page1, struct page *page2) +{ + /* Always lock in order of increasing index. */ + if (page1->index > page2->index) + swap(page1, page2); + + lock_page(page1); + if (page1 != page2) + lock_page(page2); +} + +/* Unlock two pages, being careful not to unlock the same page twice. */ +static void vfs_unlock_two_pages(struct page *page1, struct page *page2) +{ + unlock_page(page1); + if (page1 != page2) + unlock_page(page2); +} + /* * Compare extents of two files to see if they are the same. * Caller must have locked both inodes to prevent write races. @@ -1832,10 +1851,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, dest_page = vfs_dedupe_get_page(dest, destoff); if (IS_ERR(dest_page)) { error = PTR_ERR(dest_page); - unlock_page(src_page); put_page(src_page); goto out_error; } + + vfs_lock_two_pages(src_page, dest_page); + + /* + * Now that we've locked both pages, make sure they're still + * mapped to the file data we're interested in. If not, + * someone is invalidating pages on us and we lose. + */ + if (!PageUptodate(src_page) || !PageUptodate(dest_page) || + src_page->mapping != src->i_mapping || + dest_page->mapping != dest->i_mapping) { + same = false; + goto unlock; + } + src_addr = kmap_atomic(src_page); dest_addr = kmap_atomic(dest_page);
@@ -1847,8 +1880,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
kunmap_atomic(dest_addr); kunmap_atomic(src_addr); - unlock_page(dest_page); - unlock_page(src_page); +unlock: + vfs_unlock_two_pages(src_page, dest_page); put_page(dest_page); put_page(src_page);
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit 1eca92eef18719027d394bf1a2d276f43e7cf886 ]
In cx82310_bind(), 'dev->partial_data' is allocated through kmalloc(). Then, the execution waits for the firmware to become ready. If the firmware is not ready in time, the execution is terminated. However, the allocated 'dev->partial_data' is not deallocated on this path, leading to a memory leak bug. To fix this issue, free 'dev->partial_data' before returning the error.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/usb/cx82310_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 5519248a791eb..32b08b18e1208 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) } if (!timeout) { dev_err(&udev->dev, "firmware not ready in time\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto err; }
/* enable ethernet mode (?) */
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit f1472cb09f11ddb41d4be84f0650835cb65a9073 ]
In kalmia_init_and_get_ethernet_addr(), 'usb_buf' is allocated through kmalloc(). In the following execution, if the 'status' returned by kalmia_send_init_packet() is not 0, 'usb_buf' is not deallocated, leading to memory leaks. To fix this issue, add the 'out' label to free 'usb_buf'.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/usb/kalmia.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index d62b6706a5376..fc5895f85cee2 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr) status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), usb_buf, 24); if (status != 0) - return status; + goto out;
memcpy(usb_buf, init_msg_2, 12); status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), usb_buf, 28); if (status != 0) - return status; + goto out;
memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); - +out: kfree(usb_buf); return status; }
From: Thomas Falcon tlfalcon@linux.ibm.com
[ Upstream commit 80f0fe0934cd3daa13a5e4d48a103f469115b160 ]
There's no need to wait until a completion is received to unmap TX descriptor buffers that have been passed to the hypervisor. Instead unmap it when the hypervisor call has completed. This patch avoids the possibility that a buffer will not be unmapped because a TX completion is lost or mishandled.
Reported-by: Abdul Haleem abdhalee@linux.vnet.ibm.com Tested-by: Devesh K. Singh devesh_singh@in.ibm.com Signed-off-by: Thomas Falcon tlfalcon@linux.ibm.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/ibm/ibmvnic.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3da6800732656..cebd20f3128d4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, (u64)num_entries); + dma_unmap_single(dev, tx_buff->indir_dma, + sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); } else { tx_buff->num_entries = num_entries; lpar_rc = send_subcrq(adapter, handle_array[queue_num], @@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, union sub_crq *next; int index; int i, j; - u8 *first;
restart_loop: while (pending_scrq(adapter, scrq)) { @@ -2818,14 +2819,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
txbuff->data_dma[j] = 0; } - /* if sub_crq was sent indirectly */ - first = &txbuff->indir_arr[0].generic.first; - if (*first == IBMVNIC_CRQ_CMD) { - dma_unmap_single(dev, txbuff->indir_dma, - sizeof(txbuff->indir_arr), - DMA_TO_DEVICE); - *first = 0; - }
if (txbuff->last_frag) { dev_kfree_skb_any(txbuff->skb);
From: Stephen Hemminger stephen@networkplumber.org
[ Upstream commit 3434341004a380f4e47c3a03d4320d43982162a0 ]
The driver name gets exposed in sysfs under /sys/bus/pci/drivers so it should look like other devices. Change it to be common format (instead of "Cavium PTP").
This is a trivial fix that was observed by accident because Debian kernels were building this driver into kernel (bug).
Signed-off-by: Stephen Hemminger stephen@networkplumber.org Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/cavium/common/cavium_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 73632b8437498..b821c9e1604cf 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -10,7 +10,7 @@
#include "cavium_ptp.h"
-#define DRV_NAME "Cavium PTP Driver" +#define DRV_NAME "cavium_ptp"
#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit 44ef3a03252844a8753479b0cea7f29e4a804bdc ]
In i2400m_barker_db_init(), 'options_orig' is allocated through kstrdup() to hold the original command line options. Then, the options are parsed. However, if an error occurs during the parsing process, 'options_orig' is not deallocated, leading to a memory leak bug. To fix this issue, free 'options_orig' before returning the error.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wimax/i2400m/fw.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index e9fc168bb7345..489cba9b284d1 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options) } result = i2400m_barker_db_add(barker); if (result < 0) - goto error_add; + goto error_parse_add; } kfree(options_orig); } return 0;
+error_parse_add: error_parse: + kfree(options_orig); error_add: kfree(i2400m_barker_db); return result;
From: Tho Vu tho.vu.wh@rvc.renesas.com
[ Upstream commit cfef46d692efd852a0da6803f920cc756eea2855 ]
When a Tx timestamp is requested, a pointer to the skb is stored in the ravb_tstamp_skb struct. This was done without an skb_get. There exists the possibility that the skb could be freed by ravb_tx_free (when ravb_tx_free is called from ravb_start_xmit) before the timestamp was processed, leading to a use-after-free bug.
Use skb_get when filling a ravb_tstamp_skb struct, and add appropriate frees/consumes when a ravb_tstamp_skb struct is freed.
Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Signed-off-by: Tho Vu tho.vu.wh@rvc.renesas.com Signed-off-by: Kazuya Mizuguchi kazuya.mizuguchi.ks@renesas.com Signed-off-by: Simon Horman horms+renesas@verge.net.au Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/renesas/ravb_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ef8f08931fe8b..6cacd5e893aca 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Renesas Ethernet AVB device driver * - * Copyright (C) 2014-2015 Renesas Electronics Corporation + * Copyright (C) 2014-2019 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. * Copyright (C) 2015-2016 Cogent Embedded, Inc. source@cogentembedded.com * @@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) kfree(ts_skb); if (tag == tfa_tag) { skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); break; + } else { + dev_kfree_skb_any(skb); } } ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); @@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) } goto unmap; } - ts_skb->skb = skb; + ts_skb->skb = skb_get(skb); ts_skb->tag = priv->ts_skb_tag++; priv->ts_skb_tag &= 0x3ff; list_add_tail(&ts_skb->list, &priv->ts_skb_list); @@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev) /* Clear the timestamp list */ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { list_del(&ts_skb->list); + kfree_skb(ts_skb->skb); kfree(ts_skb); }
From: Sebastian Andrzej Siewior bigeasy@linutronix.de
[ Upstream commit b0fdc01354f45d43f082025636ef808968a27b36 ]
If a task is PI-blocked (blocking on sleeping spinlock) then we don't want to schedule a new kworker if we schedule out due to lock contention because !RT does not do that as well. A spinning spinlock disables preemption and a worker does not schedule out on lock contention (but spin).
On RT the RW-semaphore implementation uses an rtmutex so tsk_is_pi_blocked() will return true if a task blocks on it. In this case we will now start a new worker which may deadlock if one worker is waiting on progress from another worker. Since a RW-semaphore starts a new worker on !RT, we should do the same on RT.
XFS is able to trigger this deadlock.
Allow to schedule new worker if the current worker is PI-blocked.
Signed-off-by: Sebastian Andrzej Siewior bigeasy@linutronix.de Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Link: http://lkml.kernel.org/r/20190816160626.12742-1-bigeasy@linutronix.de Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sched/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d5962232a553..42bc2986520d7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3469,7 +3469,7 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk) { - if (!tsk->state || tsk_is_pi_blocked(tsk)) + if (!tsk->state) return;
/* @@ -3485,6 +3485,9 @@ static inline void sched_submit_work(struct task_struct *tsk) preempt_enable_no_resched(); }
+ if (tsk_is_pi_blocked(tsk)) + return; + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks.
From: Andrea Righi andrea.righi@canonical.com
[ Upstream commit f1c6ece23729257fb46562ff9224cf5f61b818da ]
lockdep reports the following deadlock scenario:
WARNING: possible circular locking dependency detected
kworker/1:1/48 is trying to acquire lock: 000000008d7a62b2 (text_mutex){+.+.}, at: kprobe_optimizer+0x163/0x290
but task is already holding lock: 00000000850b5e2d (module_mutex){+.+.}, at: kprobe_optimizer+0x31/0x290
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (module_mutex){+.+.}: __mutex_lock+0xac/0x9f0 mutex_lock_nested+0x1b/0x20 set_all_modules_text_rw+0x22/0x90 ftrace_arch_code_modify_prepare+0x1c/0x20 ftrace_run_update_code+0xe/0x30 ftrace_startup_enable+0x2e/0x50 ftrace_startup+0xa7/0x100 register_ftrace_function+0x27/0x70 arm_kprobe+0xb3/0x130 enable_kprobe+0x83/0xa0 enable_trace_kprobe.part.0+0x2e/0x80 kprobe_register+0x6f/0xc0 perf_trace_event_init+0x16b/0x270 perf_kprobe_init+0xa7/0xe0 perf_kprobe_event_init+0x3e/0x70 perf_try_init_event+0x4a/0x140 perf_event_alloc+0x93a/0xde0 __do_sys_perf_event_open+0x19f/0xf30 __x64_sys_perf_event_open+0x20/0x30 do_syscall_64+0x65/0x1d0 entry_SYSCALL_64_after_hwframe+0x49/0xbe
-> #0 (text_mutex){+.+.}: __lock_acquire+0xfcb/0x1b60 lock_acquire+0xca/0x1d0 __mutex_lock+0xac/0x9f0 mutex_lock_nested+0x1b/0x20 kprobe_optimizer+0x163/0x290 process_one_work+0x22b/0x560 worker_thread+0x50/0x3c0 kthread+0x112/0x150 ret_from_fork+0x3a/0x50
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0 CPU1 ---- ---- lock(module_mutex); lock(text_mutex); lock(module_mutex); lock(text_mutex);
*** DEADLOCK ***
As a reproducer I've been using bcc's funccount.py (https://github.com/iovisor/bcc/blob/master/tools/funccount.py), for example:
# ./funccount.py '*interrupt*'
That immediately triggers the lockdep splat.
Fix by acquiring text_mutex before module_mutex in kprobe_optimizer().
Signed-off-by: Andrea Righi andrea.righi@canonical.com Acked-by: Masami Hiramatsu mhiramat@kernel.org Cc: Anil S Keshavamurthy anil.s.keshavamurthy@intel.com Cc: David S. Miller davem@davemloft.net Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Naveen N. Rao naveen.n.rao@linux.ibm.com Cc: Peter Zijlstra peterz@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Fixes: d5b844a2cf50 ("ftrace/x86: Remove possible deadlock between register_kprobe() and ftrace_run_update_code()") Link: http://lkml.kernel.org/r/20190812184302.GA7010@xps-13 Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/kprobes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 445337c107e0f..2504c269e6583 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); */ static void do_optimize_kprobes(void) { + lockdep_assert_held(&text_mutex); /* * The optimization/unoptimization refers online_cpus via * stop_machine() and cpu-hotplug modifies online_cpus. @@ -487,9 +488,7 @@ static void do_optimize_kprobes(void) list_empty(&optimizing_list)) return;
- mutex_lock(&text_mutex); arch_optimize_kprobes(&optimizing_list); - mutex_unlock(&text_mutex); }
/* @@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void) { struct optimized_kprobe *op, *tmp;
+ lockdep_assert_held(&text_mutex); /* See comment in do_optimize_kprobes() */ lockdep_assert_cpus_held();
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void) if (list_empty(&unoptimizing_list)) return;
- mutex_lock(&text_mutex); arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); /* Loop free_list for disarming */ list_for_each_entry_safe(op, tmp, &freeing_list, list) { @@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void) } else list_del_init(&op->list); } - mutex_unlock(&text_mutex); }
/* Reclaim all kprobes on the free_list */ @@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work) { mutex_lock(&kprobe_mutex); cpus_read_lock(); + mutex_lock(&text_mutex); /* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex);
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work) do_free_cleaned_kprobes();
mutex_unlock(&module_mutex); + mutex_unlock(&text_mutex); cpus_read_unlock(); mutex_unlock(&kprobe_mutex);
From: Even Xu even.xu@intel.com
[ Upstream commit b640be5bc8e4673dc8049cf74176ddedecea5597 ]
EHL is a new platform using ishtp solution, add its device id to support list.
Signed-off-by: Even Xu even.xu@intel.com Acked-by: Srinivas Pandruvada srinivas.pandruvada@linux.intel.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/hid/intel-ish-hid/ipc/hw-ish.h | 1 + drivers/hid/intel-ish-hid/ipc/pci-ish.c | 1 + 2 files changed, 2 insertions(+)
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 1065692f90e20..5792a104000a9 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,6 +24,7 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define EHL_Ax_DEVICE_ID 0x4BB3
#define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 17ae49fba920f..8cce3cfe28e08 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
From: Benjamin Tissoires benjamin.tissoires@redhat.com
[ Upstream commit 2d05dba2b25ecb0f8fc3a0b4eb2232da6454a47b ]
When calling request_threaded_irq() with a CP2112, the function cp2112_gpio_irq_startup() is called in a IRQ context.
Therefore we can not sleep, and we can not call cp2112_gpio_direction_input() there.
Move the call to cp2112_gpio_direction_input() earlier to have a working driver.
Signed-off-by: Benjamin Tissoires benjamin.tissoires@redhat.com Signed-off-by: Jiri Kosina jkosina@suse.cz Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/hid/hid-cp2112.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 8bbe3d0cbe5d9..8fd44407a0df7 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1152,8 +1152,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
- cp2112_gpio_direction_input(gc, d->hwirq); - if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); @@ -1201,6 +1199,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, return PTR_ERR(dev->desc[pin]); }
+ ret = cp2112_gpio_direction_input(&dev->gc, pin); + if (ret < 0) { + dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n"); + goto err_desc; + } + ret = gpiochip_lock_as_irq(&dev->gc, pin); if (ret) { dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
From: "Kirill A. Shutemov" kirill@shutemov.name
[ Upstream commit 0a46fff2f9108c2c44218380a43a736cf4612541 ]
BIOS on Samsung 500C Chromebook reports very rudimentary E820 table that consists of 2 entries:
BIOS-e820: [mem 0x0000000000000000-0x0000000000000fff] usable BIOS-e820: [mem 0x00000000fffff000-0x00000000ffffffff] reserved
It breaks logic in find_trampoline_placement(): bios_start lands on the end of the first 4k page and trampoline start gets placed below 0.
Detect underflow and don't touch bios_start for such cases. It makes kernel ignore E820 table on machines that doesn't have two usable pages below BIOS_START_MAX.
Fixes: 1b3a62643660 ("x86/boot/compressed/64: Validate trampoline placement against E820") Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Signed-off-by: Borislav Petkov bp@suse.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: x86-ml x86@kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=203463 Link: https://lkml.kernel.org/r/20190813131654.24378-1-kirill.shutemov@linux.intel... Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/boot/compressed/pgtable_64.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index f8debf7aeb4c1..f0537a1f7fc25 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void)
/* Find the first usable memory region under bios_start. */ for (i = boot_params->e820_entries - 1; i >= 0; i--) { + unsigned long new; + entry = &boot_params->e820_table[i];
/* Skip all entries above bios_start. */ @@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void)
/* Adjust bios_start to the end of the entry if needed. */ if (bios_start > entry->addr + entry->size) - bios_start = entry->addr + entry->size; + new = entry->addr + entry->size;
/* Keep bios_start page-aligned. */ - bios_start = round_down(bios_start, PAGE_SIZE); + new = round_down(new, PAGE_SIZE);
/* Skip the entry if it's too small. */ - if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) + if (new - TRAMPOLINE_32BIT_SIZE < entry->addr) continue;
+ /* Protect against underflow. */ + if (new - TRAMPOLINE_32BIT_SIZE > bios_start) + break; + + bios_start = new; break; }
On Thu, Aug 29, 2019 at 02:12:46PM -0400, Sasha Levin wrote:
From: "Kirill A. Shutemov" kirill@shutemov.name
[ Upstream commit 0a46fff2f9108c2c44218380a43a736cf4612541 ]
BIOS on Samsung 500C Chromebook reports very rudimentary E820 table that consists of 2 entries:
BIOS-e820: [mem 0x0000000000000000-0x0000000000000fff] usable BIOS-e820: [mem 0x00000000fffff000-0x00000000ffffffff] reserved
It breaks logic in find_trampoline_placement(): bios_start lands on the end of the first 4k page and trampoline start gets placed below 0.
Detect underflow and don't touch bios_start for such cases. It makes kernel ignore E820 table on machines that doesn't have two usable pages below BIOS_START_MAX.
Fixes: 1b3a62643660 ("x86/boot/compressed/64: Validate trampoline placement against E820") Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Signed-off-by: Borislav Petkov bp@suse.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: x86-ml x86@kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=203463 Link: https://lkml.kernel.org/r/20190813131654.24378-1-kirill.shutemov@linux.intel... Signed-off-by: Sasha Levin sashal@kernel.org
Please postpone backporting the patch (and into other trees). There's a fixup for it:
http://lore.kernel.org/r/20190826133326.7cxb4vbmiawffv2r@box
On Fri, Aug 30, 2019 at 01:17:23AM +0300, Kirill A. Shutemov wrote:
On Thu, Aug 29, 2019 at 02:12:46PM -0400, Sasha Levin wrote:
From: "Kirill A. Shutemov" kirill@shutemov.name
[ Upstream commit 0a46fff2f9108c2c44218380a43a736cf4612541 ]
BIOS on Samsung 500C Chromebook reports very rudimentary E820 table that consists of 2 entries:
BIOS-e820: [mem 0x0000000000000000-0x0000000000000fff] usable BIOS-e820: [mem 0x00000000fffff000-0x00000000ffffffff] reserved
It breaks logic in find_trampoline_placement(): bios_start lands on the end of the first 4k page and trampoline start gets placed below 0.
Detect underflow and don't touch bios_start for such cases. It makes kernel ignore E820 table on machines that doesn't have two usable pages below BIOS_START_MAX.
Fixes: 1b3a62643660 ("x86/boot/compressed/64: Validate trampoline placement against E820") Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Signed-off-by: Borislav Petkov bp@suse.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: x86-ml x86@kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=203463 Link: https://lkml.kernel.org/r/20190813131654.24378-1-kirill.shutemov@linux.intel... Signed-off-by: Sasha Levin sashal@kernel.org
Please postpone backporting the patch (and into other trees). There's a fixup for it:
http://lore.kernel.org/r/20190826133326.7cxb4vbmiawffv2r@box
Sure. Should I just queue it up for a week or two later (along with the fixes), or do you want to let me know when?
-- Thanks, Sasha
On Fri, Aug 30, 2019 at 08:06:38AM -0400, Sasha Levin wrote:
On Fri, Aug 30, 2019 at 01:17:23AM +0300, Kirill A. Shutemov wrote:
On Thu, Aug 29, 2019 at 02:12:46PM -0400, Sasha Levin wrote:
From: "Kirill A. Shutemov" kirill@shutemov.name
[ Upstream commit 0a46fff2f9108c2c44218380a43a736cf4612541 ]
BIOS on Samsung 500C Chromebook reports very rudimentary E820 table that consists of 2 entries:
BIOS-e820: [mem 0x0000000000000000-0x0000000000000fff] usable BIOS-e820: [mem 0x00000000fffff000-0x00000000ffffffff] reserved
It breaks logic in find_trampoline_placement(): bios_start lands on the end of the first 4k page and trampoline start gets placed below 0.
Detect underflow and don't touch bios_start for such cases. It makes kernel ignore E820 table on machines that doesn't have two usable pages below BIOS_START_MAX.
Fixes: 1b3a62643660 ("x86/boot/compressed/64: Validate trampoline placement against E820") Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Signed-off-by: Borislav Petkov bp@suse.de Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Thomas Gleixner tglx@linutronix.de Cc: x86-ml x86@kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=203463 Link: https://lkml.kernel.org/r/20190813131654.24378-1-kirill.shutemov@linux.intel... Signed-off-by: Sasha Levin sashal@kernel.org
Please postpone backporting the patch (and into other trees). There's a fixup for it:
http://lore.kernel.org/r/20190826133326.7cxb4vbmiawffv2r@box
Sure. Should I just queue it up for a week or two later (along with the fixes), or do you want to let me know when?
You can queue it up later (two weeks is fine) once the fixup hit Linus' tree.
From: James Smart jsmart2021@gmail.com
[ Upstream commit 77ffd3465ba837e9dc714e17b014e77b2eae765a ]
When SCSI-MQ is enabled, the SCSI-MQ layers will do pre-allocation of MQ resources based on shost values set by the driver. In newer cases of the driver, which attempts to set nr_hw_queues to the cpu count, the multipliers become excessive, with a single shost having SCSI-MQ pre-allocation reaching into the multiple GBytes range. NPIV, which creates additional shosts, only multiply this overhead. On lower-memory systems, this can exhaust system memory very quickly, resulting in a system crash or failures in the driver or elsewhere due to low memory conditions.
After testing several scenarios, the situation can be mitigated by limiting the value set in shost->nr_hw_queues to 4. Although the shost values were changed, the driver still had per-cpu hardware queues of its own that allowed parallelization per-cpu. Testing revealed that even with the smallish number for nr_hw_queues for SCSI-MQ, performance levels remained near maximum with the within-driver affiinitization.
A module parameter was created to allow the value set for the nr_hw_queues to be tunable.
Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Reviewed-by: Ming Lei ming.lei@redhat.com Reviewed-by: Ewan D. Milne emilne@redhat.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc.h | 1 + drivers/scsi/lpfc/lpfc_attr.c | 15 +++++++++++++++ drivers/scsi/lpfc/lpfc_init.c | 10 ++++++---- drivers/scsi/lpfc/lpfc_sli4.h | 5 +++++ 4 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index aafcffaa25f71..4604e1bc334c0 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -822,6 +822,7 @@ struct lpfc_hba { uint32_t cfg_cq_poll_threshold; uint32_t cfg_cq_max_proc_limit; uint32_t cfg_fcp_cpu_map; + uint32_t cfg_fcp_mq_threshold; uint32_t cfg_hdw_queue; uint32_t cfg_irq_chann; uint32_t cfg_suppress_rsp; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index d4c65e2109e2f..353da12d797ba 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5640,6 +5640,19 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1, LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE");
+/* + * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues + * the driver will advertise it supports to the SCSI layer. + * + * 0 = Set nr_hw_queues by the number of CPUs or HW queues. + * 1,128 = Manually specify the maximum nr_hw_queue value to be set, + * + * Value range is [0,128]. Default value is 8. + */ +LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, + LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, + "Set the number of SCSI Queues advertised"); + /* * lpfc_hdw_queue: Set the number of Hardware Queues the driver * will advertise it supports to the NVME and SCSI layers. This also @@ -5961,6 +5974,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_cq_poll_threshold, &dev_attr_lpfc_cq_max_proc_limit, &dev_attr_lpfc_fcp_cpu_map, + &dev_attr_lpfc_fcp_mq_threshold, &dev_attr_lpfc_hdw_queue, &dev_attr_lpfc_irq_chann, &dev_attr_lpfc_suppress_rsp, @@ -7042,6 +7056,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); lpfc_hdw_queue_init(phba, lpfc_hdw_queue); lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index eaaef682de251..2fd8f15f99975 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4308,10 +4308,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) { - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) - shost->nr_hw_queues = phba->cfg_hdw_queue; - else - shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; + if (!phba->cfg_fcp_mq_threshold || + phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) + phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; + + shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), + phba->cfg_fcp_mq_threshold);
shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 8e4fd1a98023c..986594ec40e2a 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -44,6 +44,11 @@ #define LPFC_HBA_HDWQ_MAX 128 #define LPFC_HBA_HDWQ_DEF 0
+/* FCP MQ queue count limiting */ +#define LPFC_FCP_MQ_THRESHOLD_MIN 0 +#define LPFC_FCP_MQ_THRESHOLD_MAX 128 +#define LPFC_FCP_MQ_THRESHOLD_DEF 8 + /* Common buffer size to accomidate SCSI and NVME IO buffers */ #define LPFC_COMMON_IO_BUF_SZ 768
From: Dexuan Cui decui@microsoft.com
[ Upstream commit d09bc83640d524b8467a660db7b1d15e6562a1de ]
Simplify the ring buffer handling with the in-place API.
Also avoid the dynamic allocation and the memory leak in the channel callback function.
Signed-off-by: Dexuan Cui decui@microsoft.com Acked-by: Dmitry Torokhov dmitry.torokhov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/input/serio/hyperv-keyboard.c | 35 +++++---------------------- 1 file changed, 6 insertions(+), 29 deletions(-)
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 8e457e50f837e..770e36d0c66fb 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
static void hv_kbd_on_channel_callback(void *context) { + struct vmpacket_descriptor *desc; struct hv_device *hv_dev = context; - void *buffer; - int bufferlen = 0x100; /* Start with sensible size */ u32 bytes_recvd; u64 req_id; - int error;
- buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - while (1) { - error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, - &bytes_recvd, &req_id); - switch (error) { - case 0: - if (bytes_recvd == 0) { - kfree(buffer); - return; - } - - hv_kbd_handle_received_packet(hv_dev, buffer, - bytes_recvd, req_id); - break; + foreach_vmbus_pkt(desc, hv_dev->channel) { + bytes_recvd = desc->len8 * 8; + req_id = desc->trans_id;
- case -ENOBUFS: - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (!buffer) - return; - break; - } + hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd, + req_id); } }
From: Vitaly Kuznetsov vkuznets@redhat.com
[ Upstream commit 89eb4d8d25722a0a0194cf7fa47ba602e32a6da7 ]
When building hv_kvp_daemon GCC-8.3 complains:
hv_kvp_daemon.c: In function ‘kvp_get_ip_info.constprop’: hv_kvp_daemon.c:812:30: warning: ‘ip_buffer’ may be used uninitialized in this function [-Wmaybe-uninitialized] struct hv_kvp_ipaddr_value *ip_buffer;
this seems to be a false positive: we only use ip_buffer when op == KVP_OP_GET_IP_INFO and it is only unset when op == KVP_OP_ENUMERATE.
Silence the warning by initializing ip_buffer to NULL.
Signed-off-by: Vitaly Kuznetsov vkuznets@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/hv/hv_kvp_daemon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index d7e06fe0270ee..6d809abaf338a 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op, int sn_offset = 0; int error = 0; char *buffer; - struct hv_kvp_ipaddr_value *ip_buffer; + struct hv_kvp_ipaddr_value *ip_buffer = NULL; char cidr_mask[5]; /* /xyz */ int weight; int i;
From: Jens Axboe axboe@kernel.dk
[ Upstream commit 500f9fbadef86466a435726192f4ca4df7d94236 ]
If a request issue ends up being punted to async context to avoid blocking, we can get into a situation where the original application enters the poll loop for that very request before it has been issued. This should not be an issue, except that the polling will hold the io_uring uring_ctx mutex for the duration of the poll. When the async worker has actually issued the request, it needs to acquire this mutex to add the request to the poll issued list. Since the application polling is already holding this mutex, the workqueue sleeps on the mutex forever, and the application thus never gets a chance to poll for the very request it was interested in.
Fix this by ensuring that the polling drops the uring_ctx occasionally if it's not making any progress.
Reported-by: Jeffrey M. Birnbaum jmbnyc@gmail.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/io_uring.c | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 61018559e8fe6..5bb01d84f38d3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -743,11 +743,34 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx) static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, long min) { - int ret = 0; + int iters, ret = 0; + + /* + * We disallow the app entering submit/complete with polling, but we + * still need to lock the ring to prevent racing with polled issue + * that got punted to a workqueue. + */ + mutex_lock(&ctx->uring_lock);
+ iters = 0; do { int tmin = 0;
+ /* + * If a submit got punted to a workqueue, we can have the + * application entering polling for a command before it gets + * issued. That app will hold the uring_lock for the duration + * of the poll right here, so we need to take a breather every + * now and then to ensure that the issue has a chance to add + * the poll to the issued list. Otherwise we can spin here + * forever, while the workqueue is stuck trying to acquire the + * very same mutex. + */ + if (!(++iters & 7)) { + mutex_unlock(&ctx->uring_lock); + mutex_lock(&ctx->uring_lock); + } + if (*nr_events < min) tmin = min - *nr_events;
@@ -757,6 +780,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ret = 0; } while (min && !*nr_events && !need_resched());
+ mutex_unlock(&ctx->uring_lock); return ret; }
@@ -2073,15 +2097,7 @@ static int io_sq_thread(void *data) unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) { - /* - * We disallow the app entering submit/complete - * with polling, but we still need to lock the - * ring to prevent racing with polled issue - * that got punted to a workqueue. - */ - mutex_lock(&ctx->uring_lock); io_iopoll_check(ctx, &nr_events, 0); - mutex_unlock(&ctx->uring_lock); } else { /* * Normal IO, just pretend everything completed. @@ -2978,9 +2994,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, min_complete = min(min_complete, ctx->cq_entries);
if (ctx->flags & IORING_SETUP_IOPOLL) { - mutex_lock(&ctx->uring_lock); ret = io_iopoll_check(ctx, &nr_events, min_complete); - mutex_unlock(&ctx->uring_lock); } else { ret = io_cqring_wait(ctx, min_complete, sig, sigsz); }
From: Anton Eidelman anton@lightbitslabs.com
[ Upstream commit 504db087aaccdb32af61539916409f7dca31ceb5 ]
nvme_state_set_live() making a path available triggers requeue_work in order to resubmit requests that ended up on requeue_list when no paths were available.
This requeue_work may race with concurrent nvme_ns_head_make_request() that do not observe the live path yet. Such concurrent requests may by made by either: - New IO submission. - Requeue_work triggered by nvme_failover_req() or another ana_work.
A race may cause requeue_work capture the state of requeue_list before more requests get onto the list. These requests will stay on the list forever unless requeue_work is triggered again.
In order to prevent such race, nvme_state_set_live() should synchronize_srcu(&head->srcu) before triggering the requeue_work and prevent nvme_ns_head_make_request referencing an old snapshot of the path list.
Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Anton Eidelman anton@lightbitslabs.com Signed-off-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/multipath.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index e942b3e840687..4f599366bc5ae 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -356,6 +356,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) srcu_read_unlock(&head->srcu, srcu_idx); }
+ synchronize_srcu(&ns->head->srcu); kblockd_schedule_work(&ns->head->requeue_work); }
From: "Guilherme G. Piccoli" gpiccoli@canonical.com
[ Upstream commit a89fcca8185633993018dc081d6b021d005e6d0b ]
Commit 1b1031ca63b2 ("nvme: validate cntlid during controller initialisation") introduced a validation for controllers with duplicate cntlid that runs on nvme_init_subsystem(). The problem is that the validation relies on ctrl->cntlid, and this value is assigned (from id_ctrl value) after the call for nvme_init_subsystem() in nvme_init_identify() for non-fabrics scenario. That leads to ctrl->cntlid always being 0 in case we have a physical set of controllers in the same subsystem.
This patch fixes that by loading the discovered cntlid id_ctrl value into ctrl->cntlid before the subsystem initialization, only for the non-fabrics case. The patch was tested with emulated nvme devices (qemu) having two controllers in a single subsystem. Without the patch, we couldn't make it work failing in the duplicate check; when running with the patch, we could see the subsystem holding both controllers.
For the fabrics case we see ctrl->cntlid has a more intricate relation with the admin connect, so we didn't change that.
Fixes: 1b1031ca63b2 ("nvme: validate cntlid during controller initialisation") Signed-off-by: Guilherme G. Piccoli gpiccoli@canonical.com Reviewed-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5deb4deb38209..c40b807c667ed 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2543,6 +2543,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; }
+ if (!(ctrl->ops->flags & NVME_F_FABRICS)) + ctrl->cntlid = le16_to_cpu(id->cntlid); + if (!ctrl->identified) { int i;
@@ -2643,7 +2646,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; } } else { - ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); ctrl->hmminds = le32_to_cpu(id->hmminds);
From: Jens Axboe axboe@kernel.dk
[ Upstream commit a3a0e43fd77013819e4b6f55e37e0efe8e35d805 ]
We need to check if we have CQEs pending before starting a poll loop, as those could be the events we will be spinning for (and hence we'll find none). This can happen if a CQE triggers an error, or if it is found by eg an IRQ before we get a chance to find it through polling.
Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/io_uring.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 5bb01d84f38d3..83e3cede11220 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -618,6 +618,13 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); }
+static unsigned io_cqring_events(struct io_cq_ring *ring) +{ + /* See comment at the top of this file */ + smp_rmb(); + return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); +} + /* * Find and free completed poll iocbs */ @@ -756,6 +763,14 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, do { int tmin = 0;
+ /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (io_cqring_events(ctx->cq_ring)) + break; + /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets @@ -2232,13 +2247,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) return submit; }
-static unsigned io_cqring_events(struct io_cq_ring *ring) -{ - /* See comment at the top of this file */ - smp_rmb(); - return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); -} - /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring.
From: zhengbin zhengbin13@huawei.com
[ Upstream commit a7bfb93f0211b4a2f1ffeeb259ed6206bac30460 ]
In cma_init, if cma_configfs_init fails, need to free the previously memory and return fail, otherwise will trigger null-ptr-deref Read in cma_cleanup.
cma_cleanup cma_configfs_exit configfs_unregister_subsystem
Fixes: 045959db65c6 ("IB/cma: Add configfs for rdma_cm") Reported-by: Hulk Robot hulkci@huawei.com Signed-off-by: zhengbin zhengbin13@huawei.com Reviewed-by: Parav Pandit parav@mellanox.com Link: https://lore.kernel.org/r/1566188859-103051-1-git-send-email-zhengbin13@huaw... Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/core/cma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 19f1730a4f244..a68d0ccf67a43 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4724,10 +4724,14 @@ static int __init cma_init(void) if (ret) goto err;
- cma_configfs_init(); + ret = cma_configfs_init(); + if (ret) + goto err_ib;
return 0;
+err_ib: + ib_unregister_client(&cma_client); err: unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client);
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit 5c1baaa82cea2c815a5180ded402a7cd455d1810 ]
In mlx4_ib_alloc_pv_bufs(), 'tun_qp->tx_ring' is allocated through kcalloc(). However, it is not always deallocated in the following execution if an error occurs, leading to memory leaks. To fix this issue, free 'tun_qp->tx_ring' whenever an error occurs.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Acked-by: Leon Romanovsky leonro@mellanox.com Link: https://lore.kernel.org/r/1566159781-4642-1-git-send-email-wenwen@cs.uga.edu Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/mlx4/mad.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 68c951491a08a..57079110af9b5 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1677,8 +1677,6 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } - kfree(tun_qp->tx_ring); - tun_qp->tx_ring = NULL; i = MLX4_NUM_TUNNEL_BUFS; err: while (i > 0) { @@ -1687,6 +1685,8 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } + kfree(tun_qp->tx_ring); + tun_qp->tx_ring = NULL; kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM;
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit b08afa064c320e5d85cdc27228426b696c4c8dae ]
In fault_opcodes_read(), 'data' is not deallocated if debugfs_file_get() fails, leading to a memory leak. To fix this bug, introduce the 'free_data' label to free 'data' before returning the error.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Reviewed-by: Leon Romanovsky leonro@mellanox.com Acked-by: Dennis Dalessandro dennis.dalessandro@intel.com Link: https://lore.kernel.org/r/1566156571-4335-1-git-send-email-wenwen@cs.uga.edu Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/hfi1/fault.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 93613e5def9b7..814324d172950 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -214,7 +214,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, return -ENOMEM; ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; bit = find_first_bit(fault->opcodes, bitsize); while (bit < bitsize) { zero = find_next_zero_bit(fault->opcodes, bitsize, bit); @@ -232,6 +232,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, data[size - 1] = '\n'; data[size] = '\0'; ret = simple_read_from_buffer(buf, len, pos, data, size); +free_data: kfree(data); return ret; }
From: Wenwen Wang wenwen@cs.uga.edu
[ Upstream commit 2323d7baab2b18d87d9bc267452e387aa9f0060a ]
In fault_opcodes_write(), 'data' is allocated through kcalloc(). However, it is not deallocated in the following execution if an error occurs, leading to memory leaks. To fix this issue, introduce the 'free_data' label to free 'data' before returning the error.
Signed-off-by: Wenwen Wang wenwen@cs.uga.edu Reviewed-by: Leon Romanovsky leonro@mellanox.com Acked-by: Dennis Dalessandro dennis.dalessandro@intel.com Link: https://lore.kernel.org/r/1566154486-3713-1-git-send-email-wenwen@cs.uga.edu Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/hfi1/fault.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 814324d172950..986c12153e62e 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, if (!data) return -ENOMEM; copy = min(len, datalen - 1); - if (copy_from_user(data, buf, copy)) - return -EFAULT; + if (copy_from_user(data, buf, copy)) { + ret = -EFAULT; + goto free_data; + }
ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; ptr = data; token = ptr; for (ptr = data; *ptr; ptr = end + 1, token = ptr) { @@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, ret = len;
debugfs_file_put(file->f_path.dentry); +free_data: kfree(data); return ret; }
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit 54577e5018a8c0cb79c9a0fa118a55c68715d398 ]
state_test and smm_test are failing on older processors that do not have xcr0. This is because on those processor KVM does provide support for KVM_GET/SET_XSAVE (to avoid having to rely on the older KVM_GET/SET_FPU) but not for KVM_GET/SET_XCRS.
Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../testing/selftests/kvm/lib/x86_64/processor.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d2ad85fb01ac0..5f1ba3da2dbd3 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1059,9 +1059,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", r);
- r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); - TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", - r); + if (kvm_check_cap(KVM_CAP_XCRS)) { + r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", + r); + }
r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", @@ -1102,9 +1104,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", r);
- r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); - TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", - r); + if (kvm_check_cap(KVM_CAP_XCRS)) { + r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", + r); + }
r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
From: Vitaly Kuznetsov vkuznets@redhat.com
[ Upstream commit e4427372398c31f57450565de277f861a4db5b3b ]
test_msr_platform_info_disabled() generates EXIT_SHUTDOWN but VMCB state is undefined after that so an attempt to launch this guest again from test_msr_platform_info_enabled() fails. Reorder the tests to make test pass.
Signed-off-by: Vitaly Kuznetsov vkuznets@redhat.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/kvm/x86_64/platform_info_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c index 40050e44ec0ac..f9334bd3cce9f 100644 --- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c @@ -99,8 +99,8 @@ int main(int argc, char *argv[]) msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); - test_msr_platform_info_disabled(vm); test_msr_platform_info_enabled(vm); + test_msr_platform_info_disabled(vm); vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
kvm_vm_free(vm);
From: Nicolai Hähnle nicolai.haehnle@amd.com
[ Upstream commit 1a701ea924815b0518733aa8d5d05c1f6fa87062 ]
Error out if the AMDGPU_CS ioctl is called with multiple SYNCOBJ_OUT and/or TIMELINE_SIGNAL chunks, since otherwise the last chunk wins while the allocated array as well as the reference counts of sync objects are leaked.
Signed-off-by: Nicolai Hähnle nicolai.haehnle@amd.com Reviewed-by: Christian König christian.koenig@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index fe028561dc0e6..bc40d6eabce7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1192,6 +1192,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem);
+ if (p->post_deps) + return -EINVAL; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), GFP_KERNEL); p->num_post_deps = 0; @@ -1215,8 +1218,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, - struct amdgpu_cs_chunk - *chunk) + struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; unsigned num_deps; @@ -1226,6 +1228,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ if (p->post_deps) + return -EINVAL; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), GFP_KERNEL); p->num_post_deps = 0;
From: Luis Henriques lhenriques@suse.com
[ Upstream commit 86968ef21596515958d5f0a40233d02be78ecec0 ]
Calling ceph_buffer_put() in __ceph_setxattr() may end up freeing the i_xattrs.prealloc_blob buffer while holding the i_ceph_lock. This can be fixed by postponing the call until later, when the lock is released.
The following backtrace was triggered by fstests generic/117.
BUG: sleeping function called from invalid context at mm/vmalloc.c:2283 in_atomic(): 1, irqs_disabled(): 0, pid: 650, name: fsstress 3 locks held by fsstress/650: #0: 00000000870a0fe8 (sb_writers#8){.+.+}, at: mnt_want_write+0x20/0x50 #1: 00000000ba0c4c74 (&type->i_mutex_dir_key#6){++++}, at: vfs_setxattr+0x55/0xa0 #2: 000000008dfbb3f2 (&(&ci->i_ceph_lock)->rlock){+.+.}, at: __ceph_setxattr+0x297/0x810 CPU: 1 PID: 650 Comm: fsstress Not tainted 5.2.0+ #437 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack+0x67/0x90 ___might_sleep.cold+0x9f/0xb1 vfree+0x4b/0x60 ceph_buffer_release+0x1b/0x60 __ceph_setxattr+0x2b4/0x810 __vfs_setxattr+0x66/0x80 __vfs_setxattr_noperm+0x59/0xf0 vfs_setxattr+0x81/0xa0 setxattr+0x115/0x230 ? filename_lookup+0xc9/0x140 ? rcu_read_lock_sched_held+0x74/0x80 ? rcu_sync_lockdep_assert+0x2e/0x60 ? __sb_start_write+0x142/0x1a0 ? mnt_want_write+0x20/0x50 path_setxattr+0xba/0xd0 __x64_sys_lsetxattr+0x24/0x30 do_syscall_64+0x50/0x1c0 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7ff23514359a
Signed-off-by: Luis Henriques lhenriques@suse.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ceph/xattr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 0619adbcbe14c..8382299fc2d84 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -1028,6 +1028,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf = NULL; + struct ceph_buffer *old_blob = NULL; int issued; int err; int dirty = 0; @@ -1101,13 +1102,15 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock); - dout(" preaallocating new blob size=%d\n", required_blob_size); + ceph_buffer_put(old_blob); /* Shouldn't be required */ + dout(" pre-allocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock); + /* prealloc_blob can't be released while holding i_ceph_lock */ if (ci->i_xattrs.prealloc_blob) - ceph_buffer_put(ci->i_xattrs.prealloc_blob); + old_blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = blob; goto retry; } @@ -1123,6 +1126,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, }
spin_unlock(&ci->i_ceph_lock); + ceph_buffer_put(old_blob); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); if (dirty)
On Thu, Aug 29, 2019 at 8:15 PM Sasha Levin sashal@kernel.org wrote:
From: Luis Henriques lhenriques@suse.com
[ Upstream commit 86968ef21596515958d5f0a40233d02be78ecec0 ]
Calling ceph_buffer_put() in __ceph_setxattr() may end up freeing the i_xattrs.prealloc_blob buffer while holding the i_ceph_lock. This can be fixed by postponing the call until later, when the lock is released.
The following backtrace was triggered by fstests generic/117.
BUG: sleeping function called from invalid context at mm/vmalloc.c:2283 in_atomic(): 1, irqs_disabled(): 0, pid: 650, name: fsstress 3 locks held by fsstress/650: #0: 00000000870a0fe8 (sb_writers#8){.+.+}, at: mnt_want_write+0x20/0x50 #1: 00000000ba0c4c74 (&type->i_mutex_dir_key#6){++++}, at: vfs_setxattr+0x55/0xa0 #2: 000000008dfbb3f2 (&(&ci->i_ceph_lock)->rlock){+.+.}, at: __ceph_setxattr+0x297/0x810 CPU: 1 PID: 650 Comm: fsstress Not tainted 5.2.0+ #437 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack+0x67/0x90 ___might_sleep.cold+0x9f/0xb1 vfree+0x4b/0x60 ceph_buffer_release+0x1b/0x60 __ceph_setxattr+0x2b4/0x810 __vfs_setxattr+0x66/0x80 __vfs_setxattr_noperm+0x59/0xf0 vfs_setxattr+0x81/0xa0 setxattr+0x115/0x230 ? filename_lookup+0xc9/0x140 ? rcu_read_lock_sched_held+0x74/0x80 ? rcu_sync_lockdep_assert+0x2e/0x60 ? __sb_start_write+0x142/0x1a0 ? mnt_want_write+0x20/0x50 path_setxattr+0xba/0xd0 __x64_sys_lsetxattr+0x24/0x30 do_syscall_64+0x50/0x1c0 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7ff23514359a
Signed-off-by: Luis Henriques lhenriques@suse.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org
fs/ceph/xattr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 0619adbcbe14c..8382299fc2d84 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -1028,6 +1028,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf = NULL;
struct ceph_buffer *old_blob = NULL; int issued; int err; int dirty = 0;
@@ -1101,13 +1102,15 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
dout(" preaallocating new blob size=%d\n", required_blob_size);
ceph_buffer_put(old_blob); /* Shouldn't be required */
dout(" pre-allocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock);
/* prealloc_blob can't be released while holding i_ceph_lock */ if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
old_blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = blob; goto retry; }
@@ -1123,6 +1126,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, }
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); if (dirty)
Hi Sasha,
I didn't tag i_ceph_lock series for stable because this is a very old bug which no one ever hit in real life, at least to my knowledge.
Please note that each of these patches requires 5c498950f730 ("libceph: allow ceph_buffer_put() to receive a NULL ceph_buffer").
Thanks,
Ilya
On Thu, Aug 29, 2019 at 10:51:04PM +0200, Ilya Dryomov wrote:
On Thu, Aug 29, 2019 at 8:15 PM Sasha Levin sashal@kernel.org wrote:
From: Luis Henriques lhenriques@suse.com
[ Upstream commit 86968ef21596515958d5f0a40233d02be78ecec0 ]
Calling ceph_buffer_put() in __ceph_setxattr() may end up freeing the i_xattrs.prealloc_blob buffer while holding the i_ceph_lock. This can be fixed by postponing the call until later, when the lock is released.
The following backtrace was triggered by fstests generic/117.
BUG: sleeping function called from invalid context at mm/vmalloc.c:2283 in_atomic(): 1, irqs_disabled(): 0, pid: 650, name: fsstress 3 locks held by fsstress/650: #0: 00000000870a0fe8 (sb_writers#8){.+.+}, at: mnt_want_write+0x20/0x50 #1: 00000000ba0c4c74 (&type->i_mutex_dir_key#6){++++}, at: vfs_setxattr+0x55/0xa0 #2: 000000008dfbb3f2 (&(&ci->i_ceph_lock)->rlock){+.+.}, at: __ceph_setxattr+0x297/0x810 CPU: 1 PID: 650 Comm: fsstress Not tainted 5.2.0+ #437 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack+0x67/0x90 ___might_sleep.cold+0x9f/0xb1 vfree+0x4b/0x60 ceph_buffer_release+0x1b/0x60 __ceph_setxattr+0x2b4/0x810 __vfs_setxattr+0x66/0x80 __vfs_setxattr_noperm+0x59/0xf0 vfs_setxattr+0x81/0xa0 setxattr+0x115/0x230 ? filename_lookup+0xc9/0x140 ? rcu_read_lock_sched_held+0x74/0x80 ? rcu_sync_lockdep_assert+0x2e/0x60 ? __sb_start_write+0x142/0x1a0 ? mnt_want_write+0x20/0x50 path_setxattr+0xba/0xd0 __x64_sys_lsetxattr+0x24/0x30 do_syscall_64+0x50/0x1c0 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7ff23514359a
Signed-off-by: Luis Henriques lhenriques@suse.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org
fs/ceph/xattr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 0619adbcbe14c..8382299fc2d84 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -1028,6 +1028,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf = NULL;
struct ceph_buffer *old_blob = NULL; int issued; int err; int dirty = 0;
@@ -1101,13 +1102,15 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
dout(" preaallocating new blob size=%d\n", required_blob_size);
ceph_buffer_put(old_blob); /* Shouldn't be required */
dout(" pre-allocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock);
/* prealloc_blob can't be released while holding i_ceph_lock */ if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
old_blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = blob; goto retry; }
@@ -1123,6 +1126,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, }
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); if (dirty)
Hi Sasha,
I didn't tag i_ceph_lock series for stable because this is a very old bug which no one ever hit in real life, at least to my knowledge.
I can drop it if you prefer.
-- Thanks, Sasha
On Thu, Aug 29, 2019 at 11:16 PM Sasha Levin sashal@kernel.org wrote:
On Thu, Aug 29, 2019 at 10:51:04PM +0200, Ilya Dryomov wrote:
On Thu, Aug 29, 2019 at 8:15 PM Sasha Levin sashal@kernel.org wrote:
From: Luis Henriques lhenriques@suse.com
[ Upstream commit 86968ef21596515958d5f0a40233d02be78ecec0 ]
Calling ceph_buffer_put() in __ceph_setxattr() may end up freeing the i_xattrs.prealloc_blob buffer while holding the i_ceph_lock. This can be fixed by postponing the call until later, when the lock is released.
The following backtrace was triggered by fstests generic/117.
BUG: sleeping function called from invalid context at mm/vmalloc.c:2283 in_atomic(): 1, irqs_disabled(): 0, pid: 650, name: fsstress 3 locks held by fsstress/650: #0: 00000000870a0fe8 (sb_writers#8){.+.+}, at: mnt_want_write+0x20/0x50 #1: 00000000ba0c4c74 (&type->i_mutex_dir_key#6){++++}, at: vfs_setxattr+0x55/0xa0 #2: 000000008dfbb3f2 (&(&ci->i_ceph_lock)->rlock){+.+.}, at: __ceph_setxattr+0x297/0x810 CPU: 1 PID: 650 Comm: fsstress Not tainted 5.2.0+ #437 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack+0x67/0x90 ___might_sleep.cold+0x9f/0xb1 vfree+0x4b/0x60 ceph_buffer_release+0x1b/0x60 __ceph_setxattr+0x2b4/0x810 __vfs_setxattr+0x66/0x80 __vfs_setxattr_noperm+0x59/0xf0 vfs_setxattr+0x81/0xa0 setxattr+0x115/0x230 ? filename_lookup+0xc9/0x140 ? rcu_read_lock_sched_held+0x74/0x80 ? rcu_sync_lockdep_assert+0x2e/0x60 ? __sb_start_write+0x142/0x1a0 ? mnt_want_write+0x20/0x50 path_setxattr+0xba/0xd0 __x64_sys_lsetxattr+0x24/0x30 do_syscall_64+0x50/0x1c0 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7ff23514359a
Signed-off-by: Luis Henriques lhenriques@suse.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org
fs/ceph/xattr.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 0619adbcbe14c..8382299fc2d84 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -1028,6 +1028,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf = NULL;
struct ceph_buffer *old_blob = NULL; int issued; int err; int dirty = 0;
@@ -1101,13 +1102,15 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
dout(" preaallocating new blob size=%d\n", required_blob_size);
ceph_buffer_put(old_blob); /* Shouldn't be required */
dout(" pre-allocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock);
/* prealloc_blob can't be released while holding i_ceph_lock */ if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
old_blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = blob; goto retry; }
@@ -1123,6 +1126,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, }
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); if (dirty)
Hi Sasha,
I didn't tag i_ceph_lock series for stable because this is a very old bug which no one ever hit in real life, at least to my knowledge.
I can drop it if you prefer.
Either is fine with me. I just wanted to explain my rationale for not tagging them for stable in the first place and point out that there is a prerequisite.
Thanks,
Ilya
From: Luis Henriques lhenriques@suse.com
[ Upstream commit 12fe3dda7ed89c95cc0ef7abc001ad1ad3e092f8 ]
Calling ceph_buffer_put() in __ceph_build_xattrs_blob() may result in freeing the i_xattrs.blob buffer while holding the i_ceph_lock. This can be fixed by having this function returning the old blob buffer and have the callers of this function freeing it when the lock is released.
The following backtrace was triggered by fstests generic/117.
BUG: sleeping function called from invalid context at mm/vmalloc.c:2283 in_atomic(): 1, irqs_disabled(): 0, pid: 649, name: fsstress 4 locks held by fsstress/649: #0: 00000000a7478e7e (&type->s_umount_key#19){++++}, at: iterate_supers+0x77/0xf0 #1: 00000000f8de1423 (&(&ci->i_ceph_lock)->rlock){+.+.}, at: ceph_check_caps+0x7b/0xc60 #2: 00000000562f2b27 (&s->s_mutex){+.+.}, at: ceph_check_caps+0x3bd/0xc60 #3: 00000000f83ce16a (&mdsc->snap_rwsem){++++}, at: ceph_check_caps+0x3ed/0xc60 CPU: 1 PID: 649 Comm: fsstress Not tainted 5.2.0+ #439 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack+0x67/0x90 ___might_sleep.cold+0x9f/0xb1 vfree+0x4b/0x60 ceph_buffer_release+0x1b/0x60 __ceph_build_xattrs_blob+0x12b/0x170 __send_cap+0x302/0x540 ? __lock_acquire+0x23c/0x1e40 ? __mark_caps_flushing+0x15c/0x280 ? _raw_spin_unlock+0x24/0x30 ceph_check_caps+0x5f0/0xc60 ceph_flush_dirty_caps+0x7c/0x150 ? __ia32_sys_fdatasync+0x20/0x20 ceph_sync_fs+0x5a/0x130 iterate_supers+0x8f/0xf0 ksys_sync+0x4f/0xb0 __ia32_sys_sync+0xa/0x10 do_syscall_64+0x50/0x1c0 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7fc6409ab617
Signed-off-by: Luis Henriques lhenriques@suse.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ceph/caps.c | 5 ++++- fs/ceph/snap.c | 4 +++- fs/ceph/super.h | 2 +- fs/ceph/xattr.c | 11 ++++++++--- 4 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 7754d76791228..622467e47cde8 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1305,6 +1305,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, { struct ceph_inode_info *ci = cap->ci; struct inode *inode = &ci->vfs_inode; + struct ceph_buffer *old_blob = NULL; struct cap_msg_args arg; int held, revoking; int wake = 0; @@ -1369,7 +1370,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, ci->i_requested_max_size = arg.max_size;
if (flushing & CEPH_CAP_XATTR_EXCL) { - __ceph_build_xattrs_blob(ci); + old_blob = __ceph_build_xattrs_blob(ci); arg.xattr_version = ci->i_xattrs.version; arg.xattr_buf = ci->i_xattrs.blob; } else { @@ -1404,6 +1405,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob); + ret = send_cap_msg(&arg); if (ret < 0) { dout("error sending cap msg, must requeue %p\n", inode); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 72c6c022f02bd..213bc1475e91f 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -464,6 +464,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) struct inode *inode = &ci->vfs_inode; struct ceph_cap_snap *capsnap; struct ceph_snap_context *old_snapc, *new_snapc; + struct ceph_buffer *old_blob = NULL; int used, dirty;
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); @@ -540,7 +541,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) capsnap->gid = inode->i_gid;
if (dirty & CEPH_CAP_XATTR_EXCL) { - __ceph_build_xattrs_blob(ci); + old_blob = __ceph_build_xattrs_blob(ci); capsnap->xattr_blob = ceph_buffer_get(ci->i_xattrs.blob); capsnap->xattr_version = ci->i_xattrs.version; @@ -583,6 +584,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) } spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob); kfree(capsnap); ceph_put_snap_context(old_snapc); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 1d313d0536f9d..38b42d7594b67 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -924,7 +924,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat, int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); -extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); +extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci); extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); extern void __init ceph_xattr_init(void); extern void ceph_xattr_exit(void); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 8382299fc2d84..9772db01720b9 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -752,12 +752,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
/* * If there are dirty xattrs, reencode xattrs into the prealloc_blob - * and swap into place. + * and swap into place. It returns the old i_xattrs.blob (or NULL) so + * that it can be freed by the caller as the i_ceph_lock is likely to be + * held. */ -void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) +struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) { struct rb_node *p; struct ceph_inode_xattr *xattr = NULL; + struct ceph_buffer *old_blob = NULL; void *dest;
dout("__build_xattrs_blob %p\n", &ci->vfs_inode); @@ -788,12 +791,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
if (ci->i_xattrs.blob) - ceph_buffer_put(ci->i_xattrs.blob); + old_blob = ci->i_xattrs.blob; ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = NULL; ci->i_xattrs.dirty = false; ci->i_xattrs.version++; } + + return old_blob; }
static inline int __get_request_mask(struct inode *in) {
From: Luis Henriques lhenriques@suse.com
[ Upstream commit af8a85a41734f37b67ba8ce69d56b685bee4ac48 ]
Calling ceph_buffer_put() in fill_inode() may result in freeing the i_xattrs.blob buffer while holding the i_ceph_lock. This can be fixed by postponing the call until later, when the lock is released.
The following backtrace was triggered by fstests generic/070.
BUG: sleeping function called from invalid context at mm/vmalloc.c:2283 in_atomic(): 1, irqs_disabled(): 0, pid: 3852, name: kworker/0:4 6 locks held by kworker/0:4/3852: #0: 000000004270f6bb ((wq_completion)ceph-msgr){+.+.}, at: process_one_work+0x1b8/0x5f0 #1: 00000000eb420803 ((work_completion)(&(&con->work)->work)){+.+.}, at: process_one_work+0x1b8/0x5f0 #2: 00000000be1c53a4 (&s->s_mutex){+.+.}, at: dispatch+0x288/0x1476 #3: 00000000559cb958 (&mdsc->snap_rwsem){++++}, at: dispatch+0x2eb/0x1476 #4: 000000000d5ebbae (&req->r_fill_mutex){+.+.}, at: dispatch+0x2fc/0x1476 #5: 00000000a83d0514 (&(&ci->i_ceph_lock)->rlock){+.+.}, at: fill_inode.isra.0+0xf8/0xf70 CPU: 0 PID: 3852 Comm: kworker/0:4 Not tainted 5.2.0+ #441 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014 Workqueue: ceph-msgr ceph_con_workfn Call Trace: dump_stack+0x67/0x90 ___might_sleep.cold+0x9f/0xb1 vfree+0x4b/0x60 ceph_buffer_release+0x1b/0x60 fill_inode.isra.0+0xa9b/0xf70 ceph_fill_trace+0x13b/0xc70 ? dispatch+0x2eb/0x1476 dispatch+0x320/0x1476 ? __mutex_unlock_slowpath+0x4d/0x2a0 ceph_con_workfn+0xc97/0x2ec0 ? process_one_work+0x1b8/0x5f0 process_one_work+0x244/0x5f0 worker_thread+0x4d/0x3e0 kthread+0x105/0x140 ? process_one_work+0x5f0/0x5f0 ? kthread_park+0x90/0x90 ret_from_fork+0x3a/0x50
Signed-off-by: Luis Henriques lhenriques@suse.com Reviewed-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ceph/inode.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 3c7a32779574b..ca3821b0309f7 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -743,6 +743,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, int issued, new_issued, info_caps; struct timespec64 mtime, atime, ctime; struct ceph_buffer *xattr_blob = NULL; + struct ceph_buffer *old_blob = NULL; struct ceph_string *pool_ns = NULL; struct ceph_cap *new_cap = NULL; int err = 0; @@ -883,7 +884,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { if (ci->i_xattrs.blob) - ceph_buffer_put(ci->i_xattrs.blob); + old_blob = ci->i_xattrs.blob; ci->i_xattrs.blob = xattr_blob; if (xattr_blob) memcpy(ci->i_xattrs.blob->vec.iov_base, @@ -1023,8 +1024,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page, out: if (new_cap) ceph_put_cap(mdsc, new_cap); - if (xattr_blob) - ceph_buffer_put(xattr_blob); + ceph_buffer_put(old_blob); + ceph_buffer_put(xattr_blob); ceph_put_string(pool_ns); return err; }
From: Andrew Jones drjones@redhat.com
[ Upstream commit 2113c5f62b7423e4a72b890bd479704aa85c81ba ]
If after an MMIO exit to userspace a VCPU is immediately run with an immediate_exit request, such as when a signal is delivered or an MMIO emulation completion is needed, then the VCPU completes the MMIO emulation and immediately returns to userspace. As the exit_reason does not get changed from KVM_EXIT_MMIO in these cases we have to be careful not to complete the MMIO emulation again, when the VCPU is eventually run again, because the emulation does an instruction skip (and doing too many skips would be a waste of guest code :-) We need to use additional VCPU state to track if the emulation is complete. As luck would have it, we already have 'mmio_needed', which even appears to be used in this way by other architectures already.
Fixes: 0d640732dbeb ("arm64: KVM: Skip MMIO insn after emulation") Acked-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Andrew Jones drjones@redhat.com Signed-off-by: Marc Zyngier maz@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/arm/mmio.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index a8a6a0c883f1b..6af5c91337f25 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) unsigned int len; int mask;
+ /* Detect an already handled MMIO return */ + if (unlikely(!vcpu->mmio_needed)) + return 0; + + vcpu->mmio_needed = 0; + if (!run->mmio.is_write) { len = run->mmio.len; if (len > sizeof(unsigned long)) @@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, run->mmio.is_write = is_write; run->mmio.phys_addr = fault_ipa; run->mmio.len = len; + vcpu->mmio_needed = 1;
if (!ret) { /* We handled the access successfully in the kernel. */
From: David Howells dhowells@redhat.com
[ Upstream commit a5fb8e6c02d6a518fb2b1a2b8c2471fa77b69436 ]
Fix a leak on the cell refcount in afs_lookup_cell_rcu() due to non-clearance of the default error in the case a NULL cell name is passed and the workstation default cell is used.
Also put a bit at the end to make sure we don't leak a cell ref if we're going to be returning an error.
This leak results in an assertion like the following when the kafs module is unloaded:
AFS: Assertion failed 2 == 1 is false 0x2 == 0x1 is false ------------[ cut here ]------------ kernel BUG at fs/afs/cell.c:770! ... RIP: 0010:afs_manage_cells+0x220/0x42f [kafs] ... process_one_work+0x4c2/0x82c ? pool_mayday_timeout+0x1e1/0x1e1 ? do_raw_spin_lock+0x134/0x175 worker_thread+0x336/0x4a6 ? rescuer_thread+0x4af/0x4af kthread+0x1de/0x1ee ? kthread_park+0xd4/0xd4 ret_from_fork+0x24/0x30
Fixes: 989782dcdc91 ("afs: Overhaul cell database management") Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/afs/cell.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/fs/afs/cell.c b/fs/afs/cell.c index a2a87117d2626..fd5133e26a38b 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, cell = rcu_dereference_raw(net->ws_cell); if (cell) { afs_get_cell(cell); + ret = 0; break; } ret = -EDESTADDRREQ; @@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
done_seqretry(&net->cells_lock, seq);
+ if (ret != 0 && cell) + afs_put_cell(net, cell); + return ret == 0 ? cell : ERR_PTR(ret); }
From: Marc Dionne marc.dionne@auristor.com
[ Upstream commit c4c613ff08d92e72bf64a65ec35a2c3aa1cfcd06 ]
The afs_lookup trace event can cause the following:
[ 216.576777] BUG: kernel NULL pointer dereference, address: 000000000000023b [ 216.576803] #PF: supervisor read access in kernel mode [ 216.576813] #PF: error_code(0x0000) - not-present page ... [ 216.576913] RIP: 0010:trace_event_raw_event_afs_lookup+0x9e/0x1c0 [kafs]
If the inode from afs_do_lookup() is an error other than ENOENT, or if it is ENOENT and afs_try_auto_mntpt() returns an error, the trace event will try to dereference the error pointer as a valid pointer.
Use IS_ERR_OR_NULL to only pass a valid pointer for the trace, or NULL.
Ideally the trace would include the error value, but for now just avoid the oops.
Fixes: 80548b03991f ("afs: Add more tracepoints") Signed-off-by: Marc Dionne marc.dionne@auristor.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/afs/dir.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/afs/dir.c b/fs/afs/dir.c index da9563d62b327..c50cc3f6f4553 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -952,7 +952,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, inode ? AFS_FS_I(inode) : NULL); } else { trace_afs_lookup(dvnode, &dentry->d_name, - inode ? AFS_FS_I(inode) : NULL); + IS_ERR_OR_NULL(inode) ? NULL + : AFS_FS_I(inode)); } return d; }
From: YueHaibing yuehaibing@huawei.com
[ Upstream commit 7533be858f5b9a036b9f91556a3ed70786abca8e ]
It seems that 'yfs_RXYFSStoreOpaqueACL2' should be use in yfs_fs_store_opaque_acl2().
Fixes: f5e4546347bc ("afs: Implement YFS ACL setting") Signed-off-by: YueHaibing yuehaibing@huawei.com Signed-off-by: David Howells dhowells@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/afs/yfsclient.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 18722aaeda33a..a1baf3f1f14d1 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -2155,7 +2155,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
size = round_up(acl->size, 4); - call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, + call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(__be32) + size,
From: Selvin Xavier selvin.xavier@broadcom.com
[ Upstream commit d37b1e534071ab1983e7c85273234b132c77591a ]
Driver copies FW commands to the HW queue as units of 16 bytes. Some of the command structures are not exact multiple of 16. So while copying the data from those structures, the stack out of bounds messages are reported by KASAN. The following error is reported.
[ 1337.530155] ================================================================== [ 1337.530277] BUG: KASAN: stack-out-of-bounds in bnxt_qplib_rcfw_send_message+0x40a/0x850 [bnxt_re] [ 1337.530413] Read of size 16 at addr ffff888725477a48 by task rmmod/2785
[ 1337.530540] CPU: 5 PID: 2785 Comm: rmmod Tainted: G OE 5.2.0-rc6+ #75 [ 1337.530541] Hardware name: Dell Inc. PowerEdge R730/0599V5, BIOS 1.0.4 08/28/2014 [ 1337.530542] Call Trace: [ 1337.530548] dump_stack+0x5b/0x90 [ 1337.530556] ? bnxt_qplib_rcfw_send_message+0x40a/0x850 [bnxt_re] [ 1337.530560] print_address_description+0x65/0x22e [ 1337.530568] ? bnxt_qplib_rcfw_send_message+0x40a/0x850 [bnxt_re] [ 1337.530575] ? bnxt_qplib_rcfw_send_message+0x40a/0x850 [bnxt_re] [ 1337.530577] __kasan_report.cold.3+0x37/0x77 [ 1337.530581] ? _raw_write_trylock+0x10/0xe0 [ 1337.530588] ? bnxt_qplib_rcfw_send_message+0x40a/0x850 [bnxt_re] [ 1337.530590] kasan_report+0xe/0x20 [ 1337.530592] memcpy+0x1f/0x50 [ 1337.530600] bnxt_qplib_rcfw_send_message+0x40a/0x850 [bnxt_re] [ 1337.530608] ? bnxt_qplib_creq_irq+0xa0/0xa0 [bnxt_re] [ 1337.530611] ? xas_create+0x3aa/0x5f0 [ 1337.530613] ? xas_start+0x77/0x110 [ 1337.530615] ? xas_clear_mark+0x34/0xd0 [ 1337.530623] bnxt_qplib_free_mrw+0x104/0x1a0 [bnxt_re] [ 1337.530631] ? bnxt_qplib_destroy_ah+0x110/0x110 [bnxt_re] [ 1337.530633] ? bit_wait_io_timeout+0xc0/0xc0 [ 1337.530641] bnxt_re_dealloc_mw+0x2c/0x60 [bnxt_re] [ 1337.530648] bnxt_re_destroy_fence_mr+0x77/0x1d0 [bnxt_re] [ 1337.530655] bnxt_re_dealloc_pd+0x25/0x60 [bnxt_re] [ 1337.530677] ib_dealloc_pd_user+0xbe/0xe0 [ib_core] [ 1337.530683] srpt_remove_one+0x5de/0x690 [ib_srpt] [ 1337.530689] ? __srpt_close_all_ch+0xc0/0xc0 [ib_srpt] [ 1337.530692] ? xa_load+0x87/0xe0 ... [ 1337.530840] do_syscall_64+0x6d/0x1f0 [ 1337.530843] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 1337.530845] RIP: 0033:0x7ff5b389035b [ 1337.530848] Code: 73 01 c3 48 8b 0d 2d 0b 2c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d fd 0a 2c 00 f7 d8 64 89 01 48 [ 1337.530849] RSP: 002b:00007fff83425c28 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0 [ 1337.530852] RAX: ffffffffffffffda RBX: 00005596443e6750 RCX: 00007ff5b389035b [ 1337.530853] RDX: 000000000000000a RSI: 0000000000000800 RDI: 00005596443e67b8 [ 1337.530854] RBP: 0000000000000000 R08: 00007fff83424ba1 R09: 0000000000000000 [ 1337.530856] R10: 00007ff5b3902960 R11: 0000000000000206 R12: 00007fff83425e50 [ 1337.530857] R13: 00007fff8342673c R14: 00005596443e6260 R15: 00005596443e6750
[ 1337.530885] The buggy address belongs to the page: [ 1337.530962] page:ffffea001c951dc0 refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 [ 1337.530964] flags: 0x57ffffc0000000() [ 1337.530967] raw: 0057ffffc0000000 0000000000000000 ffffffff1c950101 0000000000000000 [ 1337.530970] raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 [ 1337.530970] page dumped because: kasan: bad access detected
[ 1337.530996] Memory state around the buggy address: [ 1337.531072] ffff888725477900: 00 00 00 00 f1 f1 f1 f1 00 00 00 00 00 f2 f2 f2 [ 1337.531180] ffff888725477980: 00 00 00 00 00 00 00 00 00 00 00 f1 f1 f1 f1 00 [ 1337.531288] >ffff888725477a00: 00 f2 f2 f2 f2 f2 f2 00 00 00 f2 00 00 00 00 00 [ 1337.531393] ^ [ 1337.531478] ffff888725477a80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 1337.531585] ffff888725477b00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 1337.531691] ==================================================================
Fix this by passing the exact size of each FW command to bnxt_qplib_rcfw_send_message as req->cmd_size. Before sending the command to HW, modify the req->cmd_size to number of 16 byte units.
Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Signed-off-by: Selvin Xavier selvin.xavier@broadcom.com Link: https://lore.kernel.org/r/1566468170-489-1-git-send-email-selvin.xavier@broa... Signed-off-by: Doug Ledford dledford@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 8 +++++++- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 11 ++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 48b04d2f175f9..60c8f76aab33d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, spin_unlock_irqrestore(&cmdq->lock, flags); return -EBUSY; } + + size = req->cmd_size; + /* change the cmd_size to the number of 16byte cmdq unit. + * req->cmd_size is modified here + */ + bnxt_qplib_set_cmd_slots(req); + memset(resp, 0, sizeof(*resp)); crsqe->resp = (struct creq_qp_event *)resp; crsqe->resp->cookie = req->cookie; @@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; - size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; do { /* Locate the next cmdq slot */ sw_prod = HWQ_CMP(cmdq->prod, cmdq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 2138533bb6426..dfeadc192e174 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -55,9 +55,7 @@ do { \ memset(&(req), 0, sizeof((req))); \ (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ - (req).cmd_size = (sizeof((req)) + \ - BNXT_QPLIB_CMDQE_UNITS - 1) / \ - BNXT_QPLIB_CMDQE_UNITS; \ + (req).cmd_size = sizeof((req)); \ (req).flags = cpu_to_le16(cmd_flags); \ } while (0)
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth) BNXT_QPLIB_CMDQE_UNITS); }
+/* Set the cmd_size to a factor of CMDQE unit */ +static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) +{ + req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; +} + #define MAX_CMDQ_IDX(depth) ((depth) - 1)
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
From: Jens Axboe axboe@kernel.dk
[ Upstream commit 08f5439f1df25a6cf6cf4c72cf6c13025599ce67 ]
The outer poll loop checks for whether we need to reschedule, and returns to userspace if we do. However, it's possible to get stuck in the inner loop as well, if the CPU we are running on needs to reschedule to finish the IO work.
Add the need_resched() check in the inner loop as well. This fixes a potential hang if the kernel is configured with CONFIG_PREEMPT_VOLUNTARY=y.
Reported-by: Sagi Grimberg sagi@grimberg.me Reviewed-by: Sagi Grimberg sagi@grimberg.me Tested-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/io_uring.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index 83e3cede11220..03cd8f5bba850 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -716,7 +716,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { - while (!list_empty(&ctx->poll_list)) { + while (!list_empty(&ctx->poll_list) && !need_resched()) { int ret;
ret = io_do_iopoll(ctx, nr_events, min); @@ -743,6 +743,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx) unsigned int nr_events = 0;
io_iopoll_getevents(ctx, &nr_events, 1); + + /* + * Ensure we allow local-to-the-cpu processing to take place, + * in this case we need to ensure that we reap all events. + */ + cond_resched(); } mutex_unlock(&ctx->uring_lock); }
From: Linus Walleij linus.walleij@linaro.org
[ Upstream commit 48057ed1840fde9239b1e000bea1a0a1f07c5e99 ]
The new API for registering a gpio_irq_chip along with a gpio_chip has a different semantic ordering than the old API which added the irqchip explicitly after registering the gpio_chip.
Move the calls to add the gpio_irq_chip *last* in the function, so that the different hooks setting up OF and ACPI and machine gpio_chips are called *before* we try to register the interrupts, preserving the elder semantic order.
This cropped up in the PL061 driver which used to work fine with no special ACPI quirks, but started to misbehave using the new API.
Fixes: e0d897289813 ("gpio: Implement tighter IRQ chip integration") Cc: Thierry Reding treding@nvidia.com Cc: Grygorii Strashko grygorii.strashko@ti.com Cc: Andy Shevchenko andy.shevchenko@gmail.com Reported-by: Wei Xu xuwei5@hisilicon.com Tested-by: Wei Xu xuwei5@hisilicon.com Reported-by: Andy Shevchenko andy.shevchenko@gmail.com Signed-off-by: Linus Walleij linus.walleij@linaro.org Link: https://lore.kernel.org/r/20190820080527.11796-1-linus.walleij@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpio/gpiolib.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4f333d6f2e230..42f9e00ff4d1b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1371,21 +1371,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (status) goto err_remove_from_list;
- status = gpiochip_irqchip_init_valid_mask(chip); - if (status) - goto err_remove_from_list; - status = gpiochip_alloc_valid_mask(chip); if (status) - goto err_remove_irqchip_mask; - - status = gpiochip_add_irqchip(chip, lock_key, request_key); - if (status) - goto err_free_gpiochip_mask; + goto err_remove_from_list;
status = of_gpiochip_add(chip); if (status) - goto err_remove_chip; + goto err_free_gpiochip_mask;
status = gpiochip_init_valid_mask(chip); if (status) @@ -1411,6 +1403,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
machine_gpiochip_add(chip);
+ status = gpiochip_irqchip_init_valid_mask(chip); + if (status) + goto err_remove_acpi_chip; + + status = gpiochip_add_irqchip(chip, lock_key, request_key); + if (status) + goto err_remove_irqchip_mask; + /* * By first adding the chardev, and then adding the device, * we get a device node entry in sysfs under @@ -1422,21 +1422,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (gpiolib_initialized) { status = gpiochip_setup_dev(gdev); if (status) - goto err_remove_acpi_chip; + goto err_remove_irqchip; } return 0;
+err_remove_irqchip: + gpiochip_irqchip_remove(chip); +err_remove_irqchip_mask: + gpiochip_irqchip_free_valid_mask(chip); err_remove_acpi_chip: acpi_gpiochip_remove(chip); err_remove_of_chip: gpiochip_free_hogs(chip); of_gpiochip_remove(chip); -err_remove_chip: - gpiochip_irqchip_remove(chip); err_free_gpiochip_mask: gpiochip_free_valid_mask(chip); -err_remove_irqchip_mask: - gpiochip_irqchip_free_valid_mask(chip); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list);
From: Andre Przywara andre.przywara@arm.com
[ Upstream commit 2e16f3e926ed48373c98edea85c6ad0ef69425d1 ]
At the moment we initialise the target *mask* of a virtual IRQ to the VCPU it belongs to, even though this mask is only defined for GICv2 and quickly runs out of bits for many GICv3 guests. This behaviour triggers an UBSAN complaint for more than 32 VCPUs: ------ [ 5659.462377] UBSAN: Undefined behaviour in virt/kvm/arm/vgic/vgic-init.c:223:21 [ 5659.471689] shift exponent 32 is too large for 32-bit type 'unsigned int' ------ Also for GICv3 guests the reporting of TARGET in the "vgic-state" debugfs dump is wrong, due to this very same problem.
Because there is no requirement to create the VGIC device before the VCPUs (and QEMU actually does it the other way round), we can't safely initialise mpidr or targets in kvm_vgic_vcpu_init(). But since we touch every private IRQ for each VCPU anyway later (in vgic_init()), we can just move the initialisation of those fields into there, where we definitely know the VGIC type.
On the way make sure we really have either a VGICv2 or a VGICv3 device, since the existing code is just checking for "VGICv3 or not", silently ignoring the uninitialised case.
Signed-off-by: Andre Przywara andre.przywara@arm.com Reported-by: Dave Martin dave.martin@arm.com Tested-by: Julien Grall julien.grall@arm.com Signed-off-by: Marc Zyngier maz@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/arm/vgic/vgic-init.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index bdbc297d06fb4..e621b5d45b278 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -8,6 +8,7 @@ #include <linux/cpu.h> #include <linux/kvm_host.h> #include <kvm/arm_vgic.h> +#include <asm/kvm_emulate.h> #include <asm/kvm_mmu.h> #include "vgic.h"
@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) irq->vcpu = NULL; irq->target_vcpu = vcpu0; kref_init(&irq->refcount); - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { + switch (dist->vgic_model) { + case KVM_DEV_TYPE_ARM_VGIC_V2: irq->targets = 0; irq->group = 0; - } else { + break; + case KVM_DEV_TYPE_ARM_VGIC_V3: irq->mpidr = 0; irq->group = 1; + break; + default: + kfree(dist->spis); + return -EINVAL; } } return 0; @@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) irq->intid = i; irq->vcpu = NULL; irq->target_vcpu = vcpu; - irq->targets = 1U << vcpu->vcpu_id; kref_init(&irq->refcount); if (vgic_irq_is_sgi(i)) { /* SGIs */ @@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) /* PPIs */ irq->config = VGIC_CONFIG_LEVEL; } - - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) - irq->group = 1; - else - irq->group = 0; }
if (!irqchip_in_kernel(vcpu->kvm)) @@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + switch (dist->vgic_model) { + case KVM_DEV_TYPE_ARM_VGIC_V3: irq->group = 1; - else + irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + break; + case KVM_DEV_TYPE_ARM_VGIC_V2: irq->group = 0; + irq->targets = 1U << idx; + break; + default: + ret = -EINVAL; + goto out; + } } }
linux-stable-mirror@lists.linaro.org