From: Iago Toral Quiroga itoral@igalia.com
[ Upstream commit 0d352a3a8a1f26168d09f7073e61bb4b328e3bb9 ]
If the initialization of the job fails we need to kfree() it before returning.
Signed-off-by: Iago Toral Quiroga itoral@igalia.com Signed-off-by: Eric Anholt eric@anholt.net Link: https://patchwork.freedesktop.org/patch/msgid/20190916071125.5255-1-itoral@i... Fixes: a783a09ee76d ("drm/v3d: Refactor job management.") Reviewed-by: Eric Anholt eric@anholt.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/v3d/v3d_gem.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 19c092d75266b..6316bf3646af5 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -565,6 +565,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) { + kfree(bin); v3d_job_put(&render->base); kfree(bin); return ret;
From: Iurii Zaikin yzaikin@google.com
[ Upstream commit 2cb80dbbbaba4f2f86f686c34cb79ea5cbfb0edb ]
KUnit tests for initialized data behavior of proc_dointvec that is explicitly checked in the code. Includes basic parsing tests including int min/max overflow.
Signed-off-by: Iurii Zaikin yzaikin@google.com Signed-off-by: Brendan Higgins brendanhiggins@google.com Reviewed-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Reviewed-by: Logan Gunthorpe logang@deltatee.com Acked-by: Luis Chamberlain mcgrof@kernel.org Reviewed-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Shuah Khan skhan@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/Makefile | 2 + kernel/sysctl-test.c | 392 +++++++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 11 ++ 3 files changed, 405 insertions(+) create mode 100644 kernel/sysctl-test.c
diff --git a/kernel/Makefile b/kernel/Makefile index 42557f251fea6..f2cc0d118a0bc 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -115,6 +115,8 @@ obj-$(CONFIG_TORTURE_TEST) += torture.o obj-$(CONFIG_HAS_IOMEM) += iomem.o obj-$(CONFIG_RSEQ) += rseq.o
+obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o + obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o KASAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_stackleak.o := n diff --git a/kernel/sysctl-test.c b/kernel/sysctl-test.c new file mode 100644 index 0000000000000..2a63241a8453b --- /dev/null +++ b/kernel/sysctl-test.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit test of proc sysctl. + */ + +#include <kunit/test.h> +#include <linux/sysctl.h> + +#define KUNIT_PROC_READ 0 +#define KUNIT_PROC_WRITE 1 + +static int i_zero; +static int i_one_hundred = 100; + +/* + * Test that proc_dointvec will not try to use a NULL .data field even when the + * length is non-zero. + */ +static void sysctl_test_api_dointvec_null_tbl_data(struct kunit *test) +{ + struct ctl_table null_data_table = { + .procname = "foo", + /* + * Here we are testing that proc_dointvec behaves correctly when + * we give it a NULL .data field. Normally this would point to a + * piece of memory where the value would be stored. + */ + .data = NULL, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + /* + * proc_dointvec expects a buffer in user space, so we allocate one. We + * also need to cast it to __user so sparse doesn't get mad. + */ + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + size_t len; + loff_t pos; + + /* + * We don't care what the starting length is since proc_dointvec should + * not try to read because .data is NULL. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, + KUNIT_PROC_READ, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); + + /* + * See above. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&null_data_table, + KUNIT_PROC_WRITE, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Similar to the previous test, we create a struct ctrl_table that has a .data + * field that proc_dointvec cannot do anything with; however, this time it is + * because we tell proc_dointvec that the size is 0. + */ +static void sysctl_test_api_dointvec_table_maxlen_unset(struct kunit *test) +{ + int data = 0; + struct ctl_table data_maxlen_unset_table = { + .procname = "foo", + .data = &data, + /* + * So .data is no longer NULL, but we tell proc_dointvec its + * length is 0, so it still shouldn't try to use it. + */ + .maxlen = 0, + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + size_t len; + loff_t pos; + + /* + * As before, we don't care what buffer length is because proc_dointvec + * cannot do anything because its internal .data buffer has zero length. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, + KUNIT_PROC_READ, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); + + /* + * See previous comment. + */ + len = 1234; + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&data_maxlen_unset_table, + KUNIT_PROC_WRITE, buffer, &len, + &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Here we provide a valid struct ctl_table, but we try to read and write from + * it using a buffer of zero length, so it should still fail in a similar way as + * before. + */ +static void sysctl_test_api_dointvec_table_len_is_zero(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + /* + * However, now our read/write buffer has zero length. + */ + size_t len = 0; + loff_t pos; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, + &len, &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, buffer, + &len, &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Test that proc_dointvec refuses to read when the file position is non-zero. + */ +static void sysctl_test_api_dointvec_table_read_but_position_set( + struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + void __user *buffer = (void __user *)kunit_kzalloc(test, sizeof(int), + GFP_USER); + /* + * We don't care about our buffer length because we start off with a + * non-zero file position. + */ + size_t len = 1234; + /* + * proc_dointvec should refuse to read into the buffer since the file + * pos is non-zero. + */ + loff_t pos = 1; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, buffer, + &len, &pos)); + KUNIT_EXPECT_EQ(test, (size_t)0, len); +} + +/* + * Test that we can read a two digit number in a sufficiently size buffer. + * Nothing fancy. + */ +static void sysctl_test_dointvec_read_happy_single_positive(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t len = 4; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + /* Store 13 in the data field. */ + *((int *)table.data) = 13; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, + user_buffer, &len, &pos)); + KUNIT_ASSERT_EQ(test, (size_t)3, len); + buffer[len] = '\0'; + /* And we read 13 back out. */ + KUNIT_EXPECT_STREQ(test, "13\n", buffer); +} + +/* + * Same as previous test, just now with negative numbers. + */ +static void sysctl_test_dointvec_read_happy_single_negative(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t len = 5; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + *((int *)table.data) = -16; + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_READ, + user_buffer, &len, &pos)); + KUNIT_ASSERT_EQ(test, (size_t)4, len); + buffer[len] = '\0'; + KUNIT_EXPECT_STREQ(test, "-16\n", (char *)buffer); +} + +/* + * Test that a simple positive write works. + */ +static void sysctl_test_dointvec_write_happy_single_positive(struct kunit *test) +{ + int data = 0; + /* Good table. */ + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + char input[] = "9"; + size_t len = sizeof(input) - 1; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + + memcpy(buffer, input, len); + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); + KUNIT_EXPECT_EQ(test, 9, *((int *)table.data)); +} + +/* + * Same as previous test, but now with negative numbers. + */ +static void sysctl_test_dointvec_write_happy_single_negative(struct kunit *test) +{ + int data = 0; + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + char input[] = "-9"; + size_t len = sizeof(input) - 1; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + + memcpy(buffer, input, len); + + KUNIT_EXPECT_EQ(test, 0, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, len); + KUNIT_EXPECT_EQ(test, sizeof(input) - 1, (size_t)pos); + KUNIT_EXPECT_EQ(test, -9, *((int *)table.data)); +} + +/* + * Test that writing a value smaller than the minimum possible value is not + * allowed. + */ +static void sysctl_test_api_dointvec_write_single_less_int_min( + struct kunit *test) +{ + int data = 0; + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t max_len = 32, len = max_len; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, max_len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + unsigned long abs_of_less_than_min = (unsigned long)INT_MAX + - (INT_MAX + INT_MIN) + 1; + + /* + * We use this rigmarole to create a string that contains a value one + * less than the minimum accepted value. + */ + KUNIT_ASSERT_LT(test, + (size_t)snprintf(buffer, max_len, "-%lu", + abs_of_less_than_min), + max_len); + + KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_EXPECT_EQ(test, max_len, len); + KUNIT_EXPECT_EQ(test, 0, *((int *)table.data)); +} + +/* + * Test that writing the maximum possible value works. + */ +static void sysctl_test_api_dointvec_write_single_greater_int_max( + struct kunit *test) +{ + int data = 0; + struct ctl_table table = { + .procname = "foo", + .data = &data, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }; + size_t max_len = 32, len = max_len; + loff_t pos = 0; + char *buffer = kunit_kzalloc(test, max_len, GFP_USER); + char __user *user_buffer = (char __user *)buffer; + unsigned long greater_than_max = (unsigned long)INT_MAX + 1; + + KUNIT_ASSERT_GT(test, greater_than_max, (unsigned long)INT_MAX); + KUNIT_ASSERT_LT(test, (size_t)snprintf(buffer, max_len, "%lu", + greater_than_max), + max_len); + KUNIT_EXPECT_EQ(test, -EINVAL, proc_dointvec(&table, KUNIT_PROC_WRITE, + user_buffer, &len, &pos)); + KUNIT_ASSERT_EQ(test, max_len, len); + KUNIT_EXPECT_EQ(test, 0, *((int *)table.data)); +} + +static struct kunit_case sysctl_test_cases[] = { + KUNIT_CASE(sysctl_test_api_dointvec_null_tbl_data), + KUNIT_CASE(sysctl_test_api_dointvec_table_maxlen_unset), + KUNIT_CASE(sysctl_test_api_dointvec_table_len_is_zero), + KUNIT_CASE(sysctl_test_api_dointvec_table_read_but_position_set), + KUNIT_CASE(sysctl_test_dointvec_read_happy_single_positive), + KUNIT_CASE(sysctl_test_dointvec_read_happy_single_negative), + KUNIT_CASE(sysctl_test_dointvec_write_happy_single_positive), + KUNIT_CASE(sysctl_test_dointvec_write_happy_single_negative), + KUNIT_CASE(sysctl_test_api_dointvec_write_single_less_int_min), + KUNIT_CASE(sysctl_test_api_dointvec_write_single_greater_int_max), + {} +}; + +static struct kunit_suite sysctl_test_suite = { + .name = "sysctl_test", + .test_cases = sysctl_test_cases, +}; + +kunit_test_suite(sysctl_test_suite); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6118d99117daa..ee00c6c8a373e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1939,6 +1939,17 @@ config TEST_SYSCTL
If unsure, say N.
+config SYSCTL_KUNIT_TEST + bool "KUnit test for sysctl" + depends on KUNIT + help + This builds the proc sysctl unit test, which runs on boot. + Tests the API contract and implementation correctness of sysctl. + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config TEST_UDELAY tristate "udelay test driver" help
From: James Smart jsmart2021@gmail.com
[ Upstream commit 359e10f087dbb7b9c9f3035a8cc4391af45bd651 ]
After exchanging PLOGI on an SLI-3 adapter, the PRLI exchange failed. Link trace showed the port was assigned a non-zero n_port_id, but didn't use the address on the PRLI. The assigned address is set on the port by the CONFIG_LINK mailbox command. The driver responded to the PRLI before the mailbox command completed. Thus the PRLI response used the old n_port_id.
Defer the PRLI response until CONFIG_LINK completes.
Link: https://lore.kernel.org/r/20190922035906.10977-2-jsmart2021@gmail.com Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc_nportdisc.c | 141 +++++++++++++++++++++++------ 1 file changed, 115 insertions(+), 26 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6961713825585..2a340624bfc99 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -279,6 +279,55 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); }
+/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up + * @phba: pointer to lpfc hba data structure. + * @link_mbox: pointer to CONFIG_LINK mailbox object + * + * This routine is only called if we are SLI3, direct connect pt2pt + * mode and the remote NPort issues the PLOGI after link up. + */ +void +lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) +{ + LPFC_MBOXQ_t *login_mbox; + MAILBOX_t *mb = &link_mbox->u.mb; + struct lpfc_iocbq *save_iocb; + struct lpfc_nodelist *ndlp; + int rc; + + ndlp = link_mbox->ctx_ndlp; + login_mbox = link_mbox->context3; + save_iocb = login_mbox->context3; + link_mbox->context3 = NULL; + login_mbox->context3 = NULL; + + /* Check for CONFIG_LINK error */ + if (mb->mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "4575 CONFIG_LINK fails pt2pt discovery: %x\n", + mb->mbxStatus); + mempool_free(login_mbox, phba->mbox_mem_pool); + mempool_free(link_mbox, phba->mbox_mem_pool); + lpfc_sli_release_iocbq(phba, save_iocb); + return; + } + + /* Now that CONFIG_LINK completed, and our SID is configured, + * we can now proceed with sending the PLOGI ACC. + */ + rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, + save_iocb, ndlp, login_mbox); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "4576 PLOGI ACC fails pt2pt discovery: %x\n", + rc); + mempool_free(login_mbox, phba->mbox_mem_pool); + } + + mempool_free(link_mbox, phba->mbox_mem_pool); + lpfc_sli_release_iocbq(phba, save_iocb); +} + static int lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) @@ -291,10 +340,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, IOCB_t *icmd; struct serv_parm *sp; uint32_t ed_tov; - LPFC_MBOXQ_t *mbox; + LPFC_MBOXQ_t *link_mbox; + LPFC_MBOXQ_t *login_mbox; + struct lpfc_iocbq *save_iocb; struct ls_rjt stat; uint32_t vid, flag; - int rc; + int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt)); pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; @@ -343,6 +394,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, else ndlp->nlp_fcp_info |= CLASS3;
+ defer_acc = 0; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; @@ -354,7 +406,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: @@ -396,6 +447,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ login_mbox = NULL; + link_mbox = NULL; + save_iocb = NULL; + /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { @@ -423,17 +478,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (phba->sli_rev == LPFC_SLI_REV4) lpfc_issue_reg_vfi(vport); else { - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (mbox == NULL) + defer_acc = 1; + link_mbox = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!link_mbox) goto out; - lpfc_config_link(phba, mbox); - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); + lpfc_config_link(phba, link_mbox); + link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc; + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + + save_iocb = lpfc_sli_get_iocbq(phba); + if (!save_iocb) goto out; - } + /* Save info from cmd IOCB used in rsp */ + memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, + sizeof(struct lpfc_iocbq)); }
lpfc_can_disctmo(vport); @@ -448,8 +508,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_flag |= NLP_SUPPRESS_RSP; }
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) + login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!login_mbox) goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ @@ -457,21 +517,19 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, - (uint8_t *) sp, mbox, ndlp->nlp_rpi); - if (rc) { - mempool_free(mbox, phba->mbox_mem_pool); + (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); + if (rc) goto out; - }
/* ACC PLOGI rsp command needs to execute first, - * queue this mbox command to be processed later. + * queue this login_mbox command to be processed later. */ - mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; /* - * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox + * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox * command issued in lpfc_cmpl_els_acc(). */ - mbox->vport = vport; + login_mbox->vport = vport; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); spin_unlock_irq(shost->host_lock); @@ -506,16 +564,47 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, - ndlp, mbox); + ndlp, login_mbox); if (rc) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(login_mbox, phba->mbox_mem_pool); return 1; } - rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + if (defer_acc) { + /* So the order here should be: + * Issue CONFIG_LINK mbox + * CONFIG_LINK cmpl + * Issue PLOGI ACC + * PLOGI ACC cmpl + * Issue REG_LOGIN mbox + */ + + /* Save the REG_LOGIN mbox for and rcv IOCB copy later */ + link_mbox->context3 = login_mbox; + login_mbox->context3 = save_iocb; + + /* Start the ball rolling by issuing CONFIG_LINK here */ + rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out; + return 1; + } + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox); if (rc) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(login_mbox, phba->mbox_mem_pool); return 1; out: + if (defer_acc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "4577 pt2pt discovery failure: %p %p %p\n", + save_iocb, link_mbox, login_mbox); + if (save_iocb) + lpfc_sli_release_iocbq(phba, save_iocb); + if (link_mbox) + mempool_free(link_mbox, phba->mbox_mem_pool); + if (login_mbox) + mempool_free(login_mbox, phba->mbox_mem_pool); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
From: Sreekanth Reddy sreekanth.reddy@broadcom.com
[ Upstream commit 764f472ba4a7a0c18107ebfbe1a9f1f5f5a1e411 ]
Memory leak can happen when diag buffer is released but not unregistered (where buffer is deallocated) by the user. During module unload time driver is not deallocating the buffer if the buffer is in released state.
Deallocate the diag buffer during module unload time without any diag buffer status checks.
Link: https://lore.kernel.org/r/1568379890-18347-5-git-send-email-sreekanth.reddy@... Signed-off-by: Sreekanth Reddy sreekanth.reddy@broadcom.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/mpt3sas/mpt3sas_ctl.c | 6 ------ 1 file changed, 6 deletions(-)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index d5a62fea8fe3e..bae7cf70ee177 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3717,12 +3717,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate) for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!ioc->diag_buffer[i]) continue; - if (!(ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_REGISTERED)) - continue; - if ((ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_RELEASED)) - continue; dma_free_coherent(&ioc->pdev->dev, ioc->diag_buffer_sz[i], ioc->diag_buffer[i],
From: Jonathan Lebon jlebon@redhat.com
[ Upstream commit 3e3e24b42043eceb97ed834102c2d094dfd7aaa6 ]
Currently, the SELinux LSM prevents one from setting the `security.selinux` xattr on an inode without a policy first being loaded. However, this restriction is problematic: it makes it impossible to have newly created files with the correct label before actually loading the policy.
This is relevant in distributions like Fedora, where the policy is loaded by systemd shortly after pivoting out of the initrd. In such instances, all files created prior to pivoting will be unlabeled. One then has to relabel them after pivoting, an operation which inherently races with other processes trying to access those same files.
Going further, there are use cases for creating the entire root filesystem on first boot from the initrd (e.g. Container Linux supports this today[1], and we'd like to support it in Fedora CoreOS as well[2]). One can imagine doing this in two ways: at the block device level (e.g. laying down a disk image), or at the filesystem level. In the former, labeling can simply be part of the image. But even in the latter scenario, one still really wants to be able to set the right labels when populating the new filesystem.
This patch enables this by changing behaviour in the following two ways: 1. allow `setxattr` if we're not initialized 2. don't try to set the in-core inode SID if we're not initialized; instead leave it as `LABEL_INVALID` so that revalidation may be attempted at a later time
Note the first hunk of this patch is mostly the same as a previously discussed one[3], though it was part of a larger series which wasn't accepted.
[1] https://coreos.com/os/docs/latest/root-filesystem-placement.html [2] https://github.com/coreos/fedora-coreos-tracker/issues/94 [3] https://www.spinics.net/lists/linux-initramfs/msg04593.html
Co-developed-by: Victor Kamensky kamensky@cisco.com Signed-off-by: Victor Kamensky kamensky@cisco.com Signed-off-by: Jonathan Lebon jlebon@redhat.com Signed-off-by: Paul Moore paul@paul-moore.com Signed-off-by: Sasha Levin sashal@kernel.org --- security/selinux/hooks.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 552e73d90fd25..212f48025db81 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3156,6 +3156,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); }
+ if (!selinux_state.initialized) + return (inode_owner_or_capable(inode) ? 0 : -EPERM); + sbsec = inode->i_sb->s_security; if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; @@ -3239,6 +3242,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, return; }
+ if (!selinux_state.initialized) { + /* If we haven't even been initialized, then we can't validate + * against a policy, so leave the label as invalid. It may + * resolve to a valid label on the next revalidation try if + * we've since initialized. + */ + return; + } + rc = security_context_to_sid_force(&selinux_state, value, size, &newsid); if (rc) {
From: zhengbin zhengbin13@huawei.com
[ Upstream commit 713f871b30a66dc4daff4d17b760c9916aaaf2e1 ]
In media_device_register_entity, if media_graph_walk_init fails, need to free the previously memory.
Reported-by: Hulk Robot hulkci@huawei.com Signed-off-by: zhengbin zhengbin13@huawei.com Signed-off-by: Sakari Ailus sakari.ailus@linux.intel.com Signed-off-by: Mauro Carvalho Chehab mchehab+samsung@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/media/mc/mc-device.c | 65 ++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 32 deletions(-)
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c index e19df5165e78c..da80883511352 100644 --- a/drivers/media/mc/mc-device.c +++ b/drivers/media/mc/mc-device.c @@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode) dev_dbg(devnode->parent, "Media device released\n"); }
+static void __media_device_unregister_entity(struct media_entity *entity) +{ + struct media_device *mdev = entity->graph_obj.mdev; + struct media_link *link, *tmp; + struct media_interface *intf; + unsigned int i; + + ida_free(&mdev->entity_internal_idx, entity->internal_idx); + + /* Remove all interface links pointing to this entity */ + list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { + list_for_each_entry_safe(link, tmp, &intf->links, list) { + if (link->entity == entity) + __media_remove_intf_link(link); + } + } + + /* Remove all data links that belong to this entity */ + __media_entity_remove_links(entity); + + /* Remove all pads that belong to this entity */ + for (i = 0; i < entity->num_pads; i++) + media_gobj_destroy(&entity->pads[i].graph_obj); + + /* Remove the entity */ + media_gobj_destroy(&entity->graph_obj); + + /* invoke entity_notify callbacks to handle entity removal?? */ + + entity->graph_obj.mdev = NULL; +} + /** * media_device_register_entity - Register an entity with a media device * @mdev: The media device @@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, */ ret = media_graph_walk_init(&new, mdev); if (ret) { + __media_device_unregister_entity(entity); mutex_unlock(&mdev->graph_mutex); return ret; } @@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev, } EXPORT_SYMBOL_GPL(media_device_register_entity);
-static void __media_device_unregister_entity(struct media_entity *entity) -{ - struct media_device *mdev = entity->graph_obj.mdev; - struct media_link *link, *tmp; - struct media_interface *intf; - unsigned int i; - - ida_free(&mdev->entity_internal_idx, entity->internal_idx); - - /* Remove all interface links pointing to this entity */ - list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { - list_for_each_entry_safe(link, tmp, &intf->links, list) { - if (link->entity == entity) - __media_remove_intf_link(link); - } - } - - /* Remove all data links that belong to this entity */ - __media_entity_remove_links(entity); - - /* Remove all pads that belong to this entity */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_destroy(&entity->pads[i].graph_obj); - - /* Remove the entity */ - media_gobj_destroy(&entity->graph_obj); - - /* invoke entity_notify callbacks to handle entity removal?? */ - - entity->graph_obj.mdev = NULL; -} - void media_device_unregister_entity(struct media_entity *entity) { struct media_device *mdev = entity->graph_obj.mdev;
From: Wesley Chalmers Wesley.Chalmers@amd.com
[ Upstream commit 6bd0a112ec129615d23aa5d8d3dd0be0243989aa ]
[WHY] When changing DPP global ref clock, DTO adjustments must take effect immediately, or else underflow may occur. It appears the original decision to double-buffer DTO adjustments was made to prevent underflows that occur when raising DPP ref clock (which is not double-buffered), but that same decision causes similar issues when lowering DPP global ref clock. The better solution is to order the adjustments according to whether clocks are being raised or lowered.
Signed-off-by: Wesley Chalmers Wesley.Chalmers@amd.com Reviewed-by: Dmytro Laktyushkin Dmytro.Laktyushkin@amd.com Acked-by: Anthony Koo Anthony.Koo@amd.com Acked-by: Leo Li sunpeng.li@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c | 26 ------------------- 1 file changed, 26 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 16476ed255363..2064366322755 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -119,32 +119,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
void dccg2_init(struct dccg *dccg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - // Fallthrough intentional to program all available dpp_dto's - switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { - case 6: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); - /* Fall through */ - case 5: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); - /* Fall through */ - case 4: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); - /* Fall through */ - case 3: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); - /* Fall through */ - case 2: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); - /* Fall through */ - case 1: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); - break; - default: - ASSERT(false); - break; - } }
static const struct dccg_funcs dccg2_funcs = {
From: Jay Cornwall jay.cornwall@amd.com
[ Upstream commit c18cc2bb9e064d3a613d8276f2cab3984926a779 ]
Missing synchronization with VGPR restore leads to intermittent VGPR trashing in the user shader.
Signed-off-by: Jay Cornwall jay.cornwall@amd.com Reviewed-by: Yong Zhao Yong.Zhao@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 139 +++++++++--------- .../amd/amdkfd/cwsr_trap_handler_gfx10.asm | 1 + 2 files changed, 71 insertions(+), 69 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 901fe35901656..d3400da6ab643 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, 0xbf85fff8, - 0xbf820141, 0xbef4037e, + 0xbf820142, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304080, 0x725d0100, 0xe0304100, 0x725d0200, 0xe0304180, - 0x725d0300, 0xbf820031, + 0x725d0300, 0xbf820032, 0xbef603ff, 0x01000000, 0xbef20378, 0x8078ff78, 0x00000400, 0xbefc0384, @@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304100, 0x725d0100, 0xe0304200, 0x725d0200, 0xe0304300, - 0x725d0300, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0xbef603ff, - 0x01000000, 0xf4211bfa, + 0x725d0300, 0xbf8c3f70, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, + 0xbef603ff, 0x01000000, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0xbef603ff, 0x01000000, + 0xf4211bfa, 0xf0000000, + 0x80788478, 0xf4211b3a, 0xf0000000, 0x80788478, - 0xf4211b3a, 0xf0000000, - 0x80788478, 0xf4211b7a, + 0xf4211b7a, 0xf0000000, + 0x80788478, 0xf4211eba, 0xf0000000, 0x80788478, - 0xf4211eba, 0xf0000000, - 0x80788478, 0xf4211efa, + 0xf4211efa, 0xf0000000, + 0x80788478, 0xf4211c3a, 0xf0000000, 0x80788478, - 0xf4211c3a, 0xf0000000, - 0x80788478, 0xf4211c7a, + 0xf4211c7a, 0xf0000000, + 0x80788478, 0xf4211e7a, 0xf0000000, 0x80788478, - 0xf4211e7a, 0xf0000000, - 0x80788478, 0xf4211cfa, + 0xf4211cfa, 0xf0000000, + 0x80788478, 0xf4211bba, 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef814, 0xf4211bba, - 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef815, - 0xbef2036d, 0x876dff72, - 0x0000ffff, 0xbefc036f, - 0xbefe037a, 0xbeff037b, - 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f71ff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0x876fff72, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbef30380, + 0xb9eef815, 0xbef2036d, + 0x876dff72, 0x0000ffff, + 0xbefc036f, 0xbefe037a, + 0xbeff037b, 0x876f71ff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f71ff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0x876fff72, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbef30380, 0x88736f73, + 0x876fff72, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x88736f73, 0x876fff72, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x88736f73, - 0x876fff72, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x88736f73, 0x876fff70, - 0x00800000, 0x906f976f, - 0xb9f3f807, 0x87fe7e7e, - 0x87ea6a6a, 0xb9f0f802, - 0xbf8a0000, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x88736f73, + 0x876fff70, 0x00800000, + 0x906f976f, 0xb9f3f807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f0f802, 0xbf8a0000, + 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index cdaa523ce6bee..4433bda2ce25e 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -758,6 +758,7 @@ L_RESTORE_V0: buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0)
/* restore SGPRs */ //will be 2+8+16*6
From: Chris Wilson chris@chris-wilson.co.uk
[ Upstream commit 9c98f021e4e717ffd9948fa65340ea3ef12b7935 ]
Make dma_fence_enable_sw_signaling() behave like its dma_fence_add_callback() and dma_fence_default_wait() counterparts and perform the test to enable signaling under the fence->lock, along with the action to do so. This ensure that should an implementation be trying to flush the cb_list (by signaling) on retirement before freeing the fence, it can do so in a race-free manner.
See also 0fc89b6802ba ("dma-fence: Simply wrap dma_fence_signal_locked with dma_fence_signal").
v2: Refactor all 3 enable_signaling paths to use a common function. v3: Don't argue, just keep the tracepoint in the existing spot.
Signed-off-by: Chris Wilson chris@chris-wilson.co.uk Cc: Tvrtko Ursulin tvrtko.ursulin@intel.com Reviewed-by: Tvrtko Ursulin tvrtko.ursulin@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20191004101140.32713-1-chris@c... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dma-buf/dma-fence.c | 78 +++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 43 deletions(-)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 2c136aee3e794..052a41e2451c1 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence) } EXPORT_SYMBOL(dma_fence_free);
+static bool __dma_fence_enable_signaling(struct dma_fence *fence) +{ + bool was_set; + + lockdep_assert_held(fence->lock); + + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return false; + + if (!was_set && fence->ops->enable_signaling) { + trace_dma_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + dma_fence_signal_locked(fence); + return false; + } + } + + return true; +} + /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable @@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags;
- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && - fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - spin_lock_irqsave(fence->lock, flags); - - if (!fence->ops->enable_signaling(fence)) - dma_fence_signal_locked(fence); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return;
- spin_unlock_irqrestore(fence->lock, flags); - } + spin_lock_irqsave(fence->lock, flags); + __dma_fence_enable_signaling(fence); + spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, { unsigned long flags; int ret = 0; - bool was_set;
if (WARN_ON(!fence || !func)) return -EINVAL; @@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - ret = -ENOENT; - else if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - ret = -ENOENT; - } - } - - if (!ret) { + if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); - } else + } else { INIT_LIST_HEAD(&cb->node); + ret = -ENOENT; + } + spin_unlock_irqrestore(fence->lock, flags);
return ret; @@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; - bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return ret; @@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) goto out; }
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!__dma_fence_enable_signaling(fence)) goto out;
- if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - goto out; - } - } - if (!timeout) { ret = 0; goto out;
From: Quinn Tran qutran@marvell.com
[ Upstream commit c76ae845ea836d6128982dcbd41ac35c81e2de63 ]
Add error handling logic to ELS Passthrough relating to NVME devices. Current code does not parse error code to take proper recovery action, instead it re-logins with the same login parameters that encountered the error. Ex: nport handle collision.
Link: https://lore.kernel.org/r/20190912180918.6436-10-hmadhani@marvell.com Signed-off-by: Quinn Tran qutran@marvell.com Signed-off-by: Himanshu Madhani hmadhani@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/qla2xxx/qla_iocb.c | 95 +++++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 3 deletions(-)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index bdf1994251b9b..2e272fc858ed1 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2749,6 +2749,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct qla_work_evt *e; + struct fc_port *conflict_fcport; + port_id_t cid; /* conflict Nport id */ + u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; + u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072, "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", @@ -2760,14 +2764,99 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&lio->u.els_plogi.comp); else { - if (res) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } else { + switch (fw_status[0]) { + case CS_DATA_UNDERRUN: + case CS_COMPLETE: memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.data[0] = MBS_COMMAND_COMPLETE; ea.sp = sp; qla24xx_handle_plogi_done_event(vha, &ea); + break; + case CS_IOCB_ERROR: + switch (fw_status[1]) { + case LSC_SCODE_PORTID_USED: + lid = fw_status[2] & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(fcport->port_name), + fcport->d_id, lid, &conflict_fcport); + if (conflict_fcport) { + /* + * Another fcport shares the same + * loop_id & nport id; conflict + * fcport needs to finish cleanup + * before this fcport can proceed + * to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x sched del\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + qla2x00_clear_loop_id(fcport); + set_bit(lid, vha->hw->loop_id_map); + fcport->loop_id = lid; + fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(fcport); + } + break; + + case LSC_SCODE_NPORT_USED: + cid.b.domain = (fw_status[2] >> 16) & 0xff; + cid.b.area = (fw_status[2] >> 8) & 0xff; + cid.b.al_pa = fw_status[2] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, cid.b24); + set_bit(fcport->loop_id, + vha->hw->loop_id_map); + fcport->loop_id = FC_NO_LOOP_ID; + qla24xx_post_gnl_work(vha, fcport); + break; + + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xd046, + "Exchange starvation. Resetting RISC\n"); + vha->hw->exch_starvation = 0; + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + /* fall through */ + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + break; + + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; }
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
From: Miaoqing Pan miaoqing@codeaurora.org
[ Upstream commit c5329b2d5b8b4e41be14d31ee8505b4f5607bf9b ]
If firmware reports rate_max > WMI_TPC_RATE_MAX(WMI_TPC_FINAL_RATE_MAX) or num_tx_chain > WMI_TPC_TX_N_CHAIN, it will cause array out-of-bounds access, so print a warning and reset to avoid memory corruption.
Tested HW: QCA9984 Tested FW: 10.4-3.9.0.2-00035
Signed-off-by: Miaoqing Pan miaoqing@codeaurora.org Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/ath/ath10k/debug.c | 2 +- drivers/net/wireless/ath/ath10k/wmi.c | 49 ++++++++++++++++--------- 2 files changed, 32 insertions(+), 19 deletions(-)
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index bd2b5628f850b..40baf25ac99f3 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1516,7 +1516,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, *len += scnprintf(buf + *len, buf_len - *len, "No. Preamble Rate_code ");
- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) + for (i = 0; i < tpc_stats->num_tx_chain; i++) *len += scnprintf(buf + *len, buf_len - *len, "tpc_value%d ", i);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 90f1197a6ad84..2675174cc4fec 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4668,16 +4668,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, }
pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, rate_code[i], type); @@ -4790,7 +4787,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_config_event *ev; @@ -4806,6 +4803,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) return; }
+ rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", + rate_max, WMI_TPC_RATE_MAX); + rate_max = WMI_TPC_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; @@ -4822,8 +4826,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max;
ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, @@ -5018,16 +5022,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, }
pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++;
- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, rate_code[i], type, pream_idx); @@ -5043,7 +5044,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_final_table_event *ev; @@ -5051,12 +5052,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_FINAL_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", + rate_max, WMI_TPC_FINAL_RATE_MAX); + rate_max = WMI_TPC_FINAL_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return;
- num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain);
@@ -5069,8 +5082,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max;
ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, rate_code, pream_table,
From: Miaoqing Pan miaoqing@codeaurora.org
[ Upstream commit 486a8849843455298d49e694cca9968336ce2327 ]
The memory of ar->debug.tpc_stats_final is reallocated every debugfs reading, it should be freed in ath10k_debug_destroy() for the last allocation.
Tested HW: QCA9984 Tested FW: 10.4-3.9.0.2-00035
Signed-off-by: Miaoqing Pan miaoqing@codeaurora.org Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/ath/ath10k/debug.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 40baf25ac99f3..04c50a26a4f47 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2532,6 +2532,7 @@ void ath10k_debug_destroy(struct ath10k *ar) ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats); + kfree(ar->debug.tpc_stats_final); }
int ath10k_debug_register(struct ath10k *ar)
From: Pierre Crégut pierre.cregut@orange.com
[ Upstream commit 35ff867b76576e32f34c698ccd11343f7d616204 ]
When sriov_numvfs is being updated, we call the driver->sriov_configure() function, which may enable VFs and call probe functions, which may make new devices visible. This all happens before before sriov_numvfs_store() updates sriov->num_VFs, so previously, concurrent sysfs reads of sriov_numvfs returned stale values.
Serialize the sysfs read vs the write so the read returns the correct num_VFs value.
[bhelgaas: hold device_lock instead of checking mutex_is_locked()] Link: https://bugzilla.kernel.org/show_bug.cgi?id=202991 Link: https://lore.kernel.org/r/20190911072736.32091-1-pierre.cregut@orange.com Signed-off-by: Pierre Crégut pierre.cregut@orange.com Signed-off-by: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pci/iov.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index deec9f9e0b616..9c116cbaa95d8 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -253,8 +253,14 @@ static ssize_t sriov_numvfs_show(struct device *dev, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); + u16 num_vfs; + + /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */ + device_lock(&pdev->dev); + num_vfs = pdev->sriov->num_VFs; + device_unlock(&pdev->dev);
- return sprintf(buf, "%u\n", pdev->sriov->num_VFs); + return sprintf(buf, "%u\n", num_vfs); }
/*
From: Jia He justin.he@arm.com
[ Upstream commit 83d116c53058d505ddef051e90ab27f57015b025 ]
When we tested pmdk unit test [1] vmmalloc_fork TEST3 on arm64 guest, there will be a double page fault in __copy_from_user_inatomic of cow_user_page.
To reproduce the bug, the cmd is as follows after you deployed everything: make -C src/test/vmmalloc_fork/ TEST_TIME=60m check
Below call trace is from arm64 do_page_fault for debugging purpose: [ 110.016195] Call trace: [ 110.016826] do_page_fault+0x5a4/0x690 [ 110.017812] do_mem_abort+0x50/0xb0 [ 110.018726] el1_da+0x20/0xc4 [ 110.019492] __arch_copy_from_user+0x180/0x280 [ 110.020646] do_wp_page+0xb0/0x860 [ 110.021517] __handle_mm_fault+0x994/0x1338 [ 110.022606] handle_mm_fault+0xe8/0x180 [ 110.023584] do_page_fault+0x240/0x690 [ 110.024535] do_mem_abort+0x50/0xb0 [ 110.025423] el0_da+0x20/0x24
The pte info before __copy_from_user_inatomic is (PTE_AF is cleared): [ffff9b007000] pgd=000000023d4f8003, pud=000000023da9b003, pmd=000000023d4b3003, pte=360000298607bd3
As told by Catalin: "On arm64 without hardware Access Flag, copying from user will fail because the pte is old and cannot be marked young. So we always end up with zeroed page after fork() + CoW for pfn mappings. we don't always have a hardware-managed access flag on arm64."
This patch fixes it by calling pte_mkyoung. Also, the parameter is changed because vmf should be passed to cow_user_page()
Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error in case there can be some obscure use-case (by Kirill).
[1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork
Signed-off-by: Jia He justin.he@arm.com Reported-by: Yibo Cai Yibo.Cai@arm.com Reviewed-by: Catalin Marinas catalin.marinas@arm.com Acked-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- mm/memory.c | 104 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 89 insertions(+), 15 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index cb7c940cf800c..9ea917e28ef4e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -118,6 +118,18 @@ int randomize_va_space __read_mostly = 2; #endif
+#ifndef arch_faults_on_old_pte +static inline bool arch_faults_on_old_pte(void) +{ + /* + * Those arches which don't have hw access flag feature need to + * implement their own helper. By default, "true" means pagefault + * will be hit on old pte. + */ + return true; +} +#endif + static int __init disable_randmaps(char *s) { randomize_va_space = 0; @@ -2145,32 +2157,82 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; }
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) +static inline bool cow_user_page(struct page *dst, struct page *src, + struct vm_fault *vmf) { + bool ret; + void *kaddr; + void __user *uaddr; + bool force_mkyoung; + struct vm_area_struct *vma = vmf->vma; + struct mm_struct *mm = vma->vm_mm; + unsigned long addr = vmf->address; + debug_dma_assert_idle(src);
+ if (likely(src)) { + copy_user_highpage(dst, src, addr, vma); + return true; + } + /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ - if (unlikely(!src)) { - void *kaddr = kmap_atomic(dst); - void __user *uaddr = (void __user *)(va & PAGE_MASK); + kaddr = kmap_atomic(dst); + uaddr = (void __user *)(addr & PAGE_MASK); + + /* + * On architectures with software "accessed" bits, we would + * take a double page fault, so mark it accessed here. + */ + force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte); + if (force_mkyoung) { + pte_t entry; + + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* + * Other thread has already handled the fault + * and we don't need to do anything. If it's + * not the case, the fault will be triggered + * again on the same address. + */ + ret = false; + goto pte_unlock; + }
+ entry = pte_mkyoung(vmf->orig_pte); + if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) + update_mmu_cache(vma, addr, vmf->pte); + } + + /* + * This really shouldn't fail, because the page is there + * in the page tables. But it might just be unreadable, + * in which case we just give up and fill the result with + * zeroes. + */ + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { /* - * This really shouldn't fail, because the page is there - * in the page tables. But it might just be unreadable, - * in which case we just give up and fill the result with - * zeroes. + * Give a warn in case there can be some obscure + * use-case */ - if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) - clear_page(kaddr); - kunmap_atomic(kaddr); - flush_dcache_page(dst); - } else - copy_user_highpage(dst, src, va, vma); + WARN_ON_ONCE(1); + clear_page(kaddr); + } + + ret = true; + +pte_unlock: + if (force_mkyoung) + pte_unmap_unlock(vmf->pte, vmf->ptl); + kunmap_atomic(kaddr); + flush_dcache_page(dst); + + return ret; }
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) @@ -2342,7 +2404,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) vmf->address); if (!new_page) goto oom; - cow_user_page(new_page, old_page, vmf->address, vma); + + if (!cow_user_page(new_page, old_page, vmf)) { + /* + * COW failed, if the fault was solved by other, + * it's fine. If not, userspace would re-fault on + * the same address and we will handle the fault + * from the second attempt. + */ + put_page(new_page); + if (old_page) + put_page(old_page); + return 0; + } }
if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
From: Balsundar P balsundar.p@microsemi.com
[ Upstream commit c86fbe484c10b2cd1e770770db2d6b2c88801c1d ]
The driver fails to handle data when read or written beyond device reported LBA, which triggers kernel panic
Link: https://lore.kernel.org/r/1571120524-6037-2-git-send-email-balsundar.p@micro... Signed-off-by: Balsundar P balsundar.p@microsemi.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/aacraid/aachba.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 0ed3f806ace54..2388143d59f5d 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2467,13 +2467,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; }
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", @@ -2559,13 +2559,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; }
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
From: Fuqian Huang huangfq.daxian@gmail.com
[ Upstream commit 7cf78b6b12fd5550545e4b73b35dca18bd46b44c ]
When the option is RTC_PLL_GET, pll will be copied to userland via copy_to_user. pll is initialized using mach_get_rtc_pll indirect call and mach_get_rtc_pll is only assigned with function q40_get_rtc_pll in arch/m68k/q40/config.c. In function q40_get_rtc_pll, the field pll_ctrl is not initialized. This will leak uninitialized stack content to userland. Fix this by zeroing the uninitialized field.
Signed-off-by: Fuqian Huang huangfq.daxian@gmail.com Link: https://lore.kernel.org/r/20190927121544.7650-1-huangfq.daxian@gmail.com Signed-off-by: Geert Uytterhoeven geert@linux-m68k.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/m68k/q40/config.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index e63eb5f069995..f31890078197e 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -264,6 +264,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) { int tmp = Q40_RTC_CTRL;
+ pll->pll_ctrl = 0; pll->pll_value = tmp & Q40_RTC_PLL_MASK; if (tmp & Q40_RTC_PLL_SIGN) pll->pll_value = -pll->pll_value;
From: Dave Chinner dchinner@redhat.com
[ Upstream commit 3f8a4f1d876d3e3e49e50b0396eaffcc4ba71b08 ]
[commit message is verbose for discussion purposes - will trim it down later. Some questions about implementation details at the end.]
Zorro Lang recently ran a new test to stress single inode extent counts now that they are no longer limited by memory allocation. The test was simply:
# xfs_io -f -c "falloc 0 40t" /mnt/scratch/big-file # ~/src/xfstests-dev/punch-alternating /mnt/scratch/big-file
This test uncovered a problem where the hole punching operation appeared to finish with no error, but apparently only created 268M extents instead of the 10 billion it was supposed to.
Further, trying to punch out extents that should have been present resulted in success, but no change in the extent count. It looked like a silent failure.
While running the test and observing the behaviour in real time, I observed the extent coutn growing at ~2M extents/minute, and saw this after about an hour:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next ; \
sleep 60 ; \ xfs_io -f -c "stat" /mnt/scratch/big-file |grep next
fsxattr.nextents = 127657993 fsxattr.nextents = 129683339 #
And a few minutes later this:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next fsxattr.nextents = 4177861124 #
Ah, what? Where did that 4 billion extra extents suddenly come from?
Stop the workload, unmount, mount:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next fsxattr.nextents = 166044375 #
And it's back at the expected number. i.e. the extent count is correct on disk, but it's screwed up in memory. I loaded up the extent list, and immediately:
# xfs_io -f -c "stat" /mnt/scratch/big-file |grep next fsxattr.nextents = 4192576215 #
It's bad again. So, where does that number come from? xfs_fill_fsxattr():
if (ip->i_df.if_flags & XFS_IFEXTENTS) fa->fsx_nextents = xfs_iext_count(&ip->i_df); else fa->fsx_nextents = ip->i_d.di_nextents;
And that's the behaviour I just saw in a nutshell. The on disk count is correct, but once the tree is loaded into memory, it goes whacky. Clearly there's something wrong with xfs_iext_count():
inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp) { return ifp->if_bytes / sizeof(struct xfs_iext_rec); }
Simple enough, but 134M extents is 2**27, and that's right about where things went wrong. A struct xfs_iext_rec is 16 bytes in size, which means 2**27 * 2**4 = 2**31 and we're right on target for an integer overflow. And, sure enough:
struct xfs_ifork { int if_bytes; /* bytes in if_u1 */ ....
Once we get 2**27 extents in a file, we overflow if_bytes and the in-core extent count goes wrong. And when we reach 2**28 extents, if_bytes wraps back to zero and things really start to go wrong there. This is where the silent failure comes from - only the first 2**28 extents can be looked up directly due to the overflow, all the extents above this index wrap back to somewhere in the first 2**28 extents. Hence with a regular pattern, trying to punch a hole in the range that didn't have holes mapped to a hole in the first 2**28 extents and so "succeeded" without changing anything. Hence "silent failure"...
Fix this by converting if_bytes to a int64_t and converting all the index variables and size calculations to use int64_t types to avoid overflows in future. Signed integers are still used to enable easy detection of extent count underflows. This enables scalability of extent counts to the limits of the on-disk format - MAXEXTNUM (2**31) extents.
Current testing is at over 500M extents and still going:
fsxattr.nextents = 517310478
Reported-by: Zorro Lang zlang@redhat.com Signed-off-by: Dave Chinner dchinner@redhat.com Reviewed-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/libxfs/xfs_attr_leaf.c | 18 ++++++++++-------- fs/xfs/libxfs/xfs_dir2_sf.c | 2 +- fs/xfs/libxfs/xfs_iext_tree.c | 2 +- fs/xfs/libxfs/xfs_inode_fork.c | 8 ++++---- fs/xfs/libxfs/xfs_inode_fork.h | 14 ++++++++------ 5 files changed, 24 insertions(+), 20 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index fe277ee5ec7c4..b133209f3aa6a 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -453,13 +453,15 @@ xfs_attr_copy_value( * special case for dev/uuid inodes, they have fixed size data forks. */ int -xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) +xfs_attr_shortform_bytesfit( + struct xfs_inode *dp, + int bytes) { - int offset; - int minforkoff; /* lower limit on valid forkoff locations */ - int maxforkoff; /* upper limit on valid forkoff locations */ - int dsize; - xfs_mount_t *mp = dp->i_mount; + struct xfs_mount *mp = dp->i_mount; + int64_t dsize; + int minforkoff; + int maxforkoff; + int offset;
/* rounded down */ offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3; @@ -525,7 +527,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) * A data fork btree root must have space for at least * MINDBTPTRS key/ptr pairs if the data fork is small or empty. */ - minforkoff = max(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); + minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */ @@ -924,7 +926,7 @@ xfs_attr_shortform_verify( char *endp; struct xfs_ifork *ifp; int i; - int size; + int64_t size;
ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL); ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK); diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 85f14fc2a8da9..ae16ca7c422a9 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -628,7 +628,7 @@ xfs_dir2_sf_verify( int i; int i8count; int offset; - int size; + int64_t size; int error; uint8_t filetype;
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c index 7bc87408f1a0a..52451809c4786 100644 --- a/fs/xfs/libxfs/xfs_iext_tree.c +++ b/fs/xfs/libxfs/xfs_iext_tree.c @@ -596,7 +596,7 @@ xfs_iext_realloc_root( struct xfs_ifork *ifp, struct xfs_iext_cursor *cur) { - size_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec); + int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec); void *new;
/* account for the prev/next pointers */ diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index c643beeb5a248..8fdd0424070e0 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -129,7 +129,7 @@ xfs_init_local_fork( struct xfs_inode *ip, int whichfork, const void *data, - int size) + int64_t size) { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); int mem_size = size, real_size = 0; @@ -467,11 +467,11 @@ xfs_iroot_realloc( void xfs_idata_realloc( struct xfs_inode *ip, - int byte_diff, + int64_t byte_diff, int whichfork) { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); - int new_size = (int)ifp->if_bytes + byte_diff; + int64_t new_size = ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0); ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork)); @@ -552,7 +552,7 @@ xfs_iextents_copy( struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_iext_cursor icur; struct xfs_bmbt_irec rec; - int copied = 0; + int64_t copied = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)); ASSERT(ifp->if_bytes > 0); diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 00c62ce170d0e..7b845c052fb45 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -13,16 +13,16 @@ struct xfs_dinode; * File incore extent information, present for each of data & attr forks. */ struct xfs_ifork { - int if_bytes; /* bytes in if_u1 */ - unsigned int if_seq; /* fork mod counter */ + int64_t if_bytes; /* bytes in if_u1 */ struct xfs_btree_block *if_broot; /* file's incore btree root */ - short if_broot_bytes; /* bytes allocated for root */ - unsigned char if_flags; /* per-fork flags */ + unsigned int if_seq; /* fork mod counter */ int if_height; /* height of the extent tree */ union { void *if_root; /* extent tree root */ char *if_data; /* inline file data */ } if_u1; + short if_broot_bytes; /* bytes allocated for root */ + unsigned char if_flags; /* per-fork flags */ };
/* @@ -93,12 +93,14 @@ int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, struct xfs_inode_log_item *, int); void xfs_idestroy_fork(struct xfs_inode *, int); -void xfs_idata_realloc(struct xfs_inode *, int, int); +void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff, + int whichfork); void xfs_iroot_realloc(struct xfs_inode *, int, int); int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int); int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *, int); -void xfs_init_local_fork(struct xfs_inode *, int, const void *, int); +void xfs_init_local_fork(struct xfs_inode *ip, int whichfork, + const void *data, int64_t size);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp); void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
From: Kangjie Lu kjlu@umn.edu
[ Upstream commit 57a25a5f754ce27da2cfa6f413cfd366f878db76 ]
`best_clock` is an object that may be sent out. Object `clock` contains uninitialized bytes that are copied to `best_clock`, which leads to memory disclosure and information leak.
Signed-off-by: Kangjie Lu kjlu@umn.edu Signed-off-by: Daniel Vetter daniel.vetter@ffwll.ch Link: https://patchwork.freedesktop.org/patch/msgid/20191018042953.31099-1-kjlu@um... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/gma500/cdv_intel_display.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index f56852a503e8d..8b784947ed3b9 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock)); + switch (refclk) { case 27000: if (target < 200000) {
From: Russell King rmk+kernel@armlinux.org.uk
[ Upstream commit 175fc928198236037174e5c5c066fe3c4691903e ]
Propagate the error code from request_irq(), rather than returning -EBUSY.
Signed-off-by: Russell King rmk+kernel@armlinux.org.uk Link: https://lore.kernel.org/r/E1iNIqh-0000tW-EZ@rmk-PC.armlinux.org.uk Signed-off-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- sound/soc/kirkwood/kirkwood-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index 6f69f314f2c2a..d2d5c25bf5502 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -132,7 +132,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, "kirkwood-i2s", priv); if (err) - return -EBUSY; + return err;
/* * Enable Error interrupts. We're only ack'ing them but
From: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com
[ Upstream commit 49ea07d33d9a32c17e18b322e789507280ceb2a3 ]
Multiple changes squashed in single patch to avoid tick-tock effect and avoid breaking compilation/bisect
1. Per the hardware documentation, all changes to MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL need to be validated with a self-clearing write to MCP_CONFIG_UPDATE. Add a helper and do the update when the CONFIG is changed.
2. Move interrupt enable after interrupt handler registration
3. Add a new helper to start the hardware bus reset with maximum duration to make sure the Slave(s) correctly detect the reset pattern and to ensure electrical conflicts can be resolved.
4. flush command FIFOs
Better error handling will be provided after interrupt disable is provided in follow-up patches.
Signed-off-by: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com Link: https://lore.kernel.org/r/20191022235448.17586-2-pierre-louis.bossart@linux.... Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/soundwire/cadence_master.c | 80 +++++++++++++++++++++--------- drivers/soundwire/cadence_master.h | 1 + drivers/soundwire/intel.c | 14 +++++- 3 files changed, 69 insertions(+), 26 deletions(-)
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 502ed4ec8f070..e3d06330d1258 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -231,6 +231,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value) return -EAGAIN; }
+/* + * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL + * need to be confirmed with a write to MCP_CONFIG_UPDATE + */ +static int cdns_update_config(struct sdw_cdns *cdns) +{ + int ret; + + ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, + CDNS_MCP_CONFIG_UPDATE_BIT); + if (ret < 0) + dev_err(cdns->dev, "Config update timedout\n"); + + return ret; +} + /* * debugfs */ @@ -752,7 +768,38 @@ EXPORT_SYMBOL(sdw_cdns_thread); /* * init routines */ -static int _cdns_enable_interrupt(struct sdw_cdns *cdns) + +/** + * sdw_cdns_exit_reset() - Program reset parameters and start bus operations + * @cdns: Cadence instance + */ +int sdw_cdns_exit_reset(struct sdw_cdns *cdns) +{ + /* program maximum length reset to be safe */ + cdns_updatel(cdns, CDNS_MCP_CONTROL, + CDNS_MCP_CONTROL_RST_DELAY, + CDNS_MCP_CONTROL_RST_DELAY); + + /* use hardware generated reset */ + cdns_updatel(cdns, CDNS_MCP_CONTROL, + CDNS_MCP_CONTROL_HW_RST, + CDNS_MCP_CONTROL_HW_RST); + + /* enable bus operations with clock and data */ + cdns_updatel(cdns, CDNS_MCP_CONFIG, + CDNS_MCP_CONFIG_OP, + CDNS_MCP_CONFIG_OP_NORMAL); + + /* commit changes */ + return cdns_update_config(cdns); +} +EXPORT_SYMBOL(sdw_cdns_exit_reset); + +/** + * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config + * @cdns: Cadence instance + */ +int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) { u32 mask;
@@ -784,24 +831,8 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- return 0; -} - -/** - * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config - * @cdns: Cadence instance - */ -int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) -{ - int ret; - - _cdns_enable_interrupt(cdns); - ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, - CDNS_MCP_CONFIG_UPDATE_BIT); - if (ret < 0) - dev_err(cdns->dev, "Config update timedout\n"); - - return ret; + /* commit changes */ + return cdns_update_config(cdns); } EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -975,6 +1006,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns) cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL); cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ /* flush command FIFOs */ + cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST, + CDNS_MCP_CONTROL_CMD_RST); + /* Set cmd accept mode */ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, CDNS_MCP_CONTROL_CMD_ACCEPT); @@ -997,13 +1032,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns) /* Set cmd mode for Tx and Rx cmds */ val &= ~CDNS_MCP_CONFIG_CMD;
- /* Set operation to normal */ - val &= ~CDNS_MCP_CONFIG_OP; - val |= CDNS_MCP_CONFIG_OP_NORMAL; - cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- return 0; + /* commit changes */ + return cdns_update_config(cdns); } EXPORT_SYMBOL(sdw_cdns_init);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index 0b72b70947352..1a67728c5000f 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -161,6 +161,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id); int sdw_cdns_init(struct sdw_cdns *cdns); int sdw_cdns_pdi_init(struct sdw_cdns *cdns, struct sdw_cdns_stream_config config); +int sdw_cdns_exit_reset(struct sdw_cdns *cdns); int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
#ifdef CONFIG_DEBUG_FS diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 243af8198d1c6..a2da04946f0b4 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -1050,8 +1050,6 @@ static int intel_probe(struct platform_device *pdev) if (ret) goto err_init;
- ret = sdw_cdns_enable_interrupt(&sdw->cdns); - /* Read the PDI config and initialize cadence PDI */ intel_pdi_init(sdw, &config); ret = sdw_cdns_pdi_init(&sdw->cdns, config); @@ -1069,6 +1067,18 @@ static int intel_probe(struct platform_device *pdev) goto err_init; }
+ ret = sdw_cdns_enable_interrupt(&sdw->cdns); + if (ret < 0) { + dev_err(sdw->cdns.dev, "cannot enable interrupts\n"); + goto err_init; + } + + ret = sdw_cdns_exit_reset(&sdw->cdns); + if (ret < 0) { + dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n"); + goto err_init; + } + /* Register DAIs */ ret = intel_register_dai(sdw); if (ret) {
From: Sakari Ailus sakari.ailus@linux.intel.com
[ Upstream commit a5b1d5413534607b05fb34470ff62bf395f5c8d0 ]
If NVM reading failed, the device was left powered on. Fix that.
Signed-off-by: Sakari Ailus sakari.ailus@linux.intel.com Signed-off-by: Mauro Carvalho Chehab mchehab+samsung@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/media/i2c/smiapp/smiapp-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 42805dfbffeb9..06edbe8749c64 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -2327,11 +2327,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr, if (rval < 0) { if (rval != -EBUSY && rval != -EAGAIN) pm_runtime_set_active(&client->dev); - pm_runtime_put(&client->dev); + pm_runtime_put_noidle(&client->dev); return -ENODEV; }
if (smiapp_read_nvm(sensor, sensor->nvm)) { + pm_runtime_put(&client->dev); dev_err(&client->dev, "nvm read failed\n"); return -ENODEV; }
From: Nicholas Kazlauskas nicholas.kazlauskas@amd.com
[ Upstream commit 0e3a7c2ec93b15f43a2653e52e9608484391aeaf ]
[Why] We're leaking memory by not freeing the gamma used to calculate the transfer function for legacy gamma.
[How] Release the gamma after we're done with it.
Signed-off-by: Nicholas Kazlauskas nicholas.kazlauskas@amd.com Reviewed-by: Leo Li sunpeng.li@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b43bb7f90e4e9..2233d293a707a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL);
+ dc_gamma_release(&gamma); + return res ? 0 : -ENOMEM; }
From: Dave Chinner dchinner@redhat.com
[ Upstream commit 249bd9087a5264d2b8a974081870e2e27671b4dc ]
AIO+DIO can extend the file size on IO completion, and it holds no inode locks while the IO is in flight. Therefore, a race condition exists in file size updates if we do something like this:
aio-thread fallocate-thread
lock inode submit IO beyond inode->i_size unlock inode ..... lock inode break layouts if (off + len > inode->i_size) new_size = off + len ..... inode_dio_wait() <blocks> ..... completes inode->i_size updated inode_dio_done() .... <wakes> <does stuff no long beyond EOF> if (new_size) xfs_vn_setattr(inode, new_size)
Yup, that attempt to extend the file size in the fallocate code turns into a truncate - it removes the whatever the aio write allocated and put to disk, and reduced the inode size back down to where the fallocate operation ends.
Fundamentally, xfs_file_fallocate() not compatible with racing AIO+DIO completions, so we need to move the inode_dio_wait() call up to where the lock the inode and break the layouts.
Secondly, storing the inode size and then using it unchecked without holding the ILOCK is not safe; we can only do such a thing if we've locked out and drained all IO and other modification operations, which we don't do initially in xfs_file_fallocate.
It should be noted that some of the fallocate operations are compound operations - they are made up of multiple manipulations that may zero data, and so we may need to flush and invalidate the file multiple times during an operation. However, we only need to lock out IO and other space manipulation operations once, as that lockout is maintained until the entire fallocate operation has been completed.
Signed-off-by: Dave Chinner dchinner@redhat.com Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Brian Foster bfoster@redhat.com Reviewed-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/xfs_bmap_util.c | 8 +------- fs/xfs/xfs_file.c | 30 ++++++++++++++++++++++++++++++ fs/xfs/xfs_ioctl.c | 1 + 3 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 0c71acc1b8317..d6d78e1276254 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1039,6 +1039,7 @@ out_trans_cancel: goto out_unlock; }
+/* Caller must first wait for the completion of any pending DIOs if required. */ int xfs_flush_unmap_range( struct xfs_inode *ip, @@ -1050,9 +1051,6 @@ xfs_flush_unmap_range( xfs_off_t rounding, start, end; int error;
- /* wait for the completion of any pending DIOs */ - inode_dio_wait(inode); - rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); start = round_down(offset, rounding); end = round_up(offset + len, rounding) - 1; @@ -1084,10 +1082,6 @@ xfs_free_file_space( if (len <= 0) /* if nothing being freed */ return 0;
- error = xfs_flush_unmap_range(ip, offset, len); - if (error) - return error; - startoffset_fsb = XFS_B_TO_FSB(mp, offset); endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 1e2176190c86f..203065a647652 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -818,6 +818,36 @@ xfs_file_fallocate( if (error) goto out_unlock;
+ /* + * Must wait for all AIO to complete before we continue as AIO can + * change the file size on completion without holding any locks we + * currently hold. We must do this first because AIO can update both + * the on disk and in memory inode sizes, and the operations that follow + * require the in-memory size to be fully up-to-date. + */ + inode_dio_wait(inode); + + /* + * Now AIO and DIO has drained we flush and (if necessary) invalidate + * the cached range over the first operation we are about to run. + * + * We care about zero and collapse here because they both run a hole + * punch over the range first. Because that can zero data, and the range + * of invalidation for the shift operations is much larger, we still do + * the required flush for collapse in xfs_prepare_shift(). + * + * Insert has the same range requirements as collapse, and we extend the + * file first which can zero data. Hence insert has the same + * flush/invalidate requirements as collapse and so they are both + * handled at the right time by xfs_prepare_shift(). + */ + if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE | + FALLOC_FL_COLLAPSE_RANGE)) { + error = xfs_flush_unmap_range(ip, offset, len); + if (error) + goto out_unlock; + } + if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index c93c4b7328ef7..60c4526312771 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -622,6 +622,7 @@ xfs_ioc_space( error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP); if (error) goto out_unlock; + inode_dio_wait(inode);
switch (bf->l_whence) { case 0: /*SEEK_SET*/
From: Jiri Slaby jslaby@suse.cz
[ Upstream commit e9f691d899188679746eeb96e6cb520459eda9b4 ]
There are several reports that the BUG_ON on unsupported command in mv_qc_prep can be triggered under some circumstances: https://bugzilla.suse.com/show_bug.cgi?id=1110252 https://serverfault.com/questions/888897/raid-problems-after-power-outage https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1652185 https://bugs.centos.org/view.php?id=14998
Let sata_mv handle the failure gracefully: warn about that incl. the failed command number and return an AC_ERR_INVALID error. We can do that now thanks to the previous patch.
Remove also the long-standing FIXME.
[v2] use %.2x as commands are defined as hexa.
Signed-off-by: Jiri Slaby jslaby@suse.cz Cc: Jens Axboe axboe@kernel.dk Cc: linux-ide@vger.kernel.org Cc: Sergei Shtylyov sergei.shtylyov@cogentembedded.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/ata/sata_mv.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index bde695a320973..0229b618d0eee 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. - * - * FIXME: modify libata to give qc_prep a return value and - * return error here. */ - BUG_ON(tf->command); - break; + ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, + tf->command); + return AC_ERR_INVALID; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
From: Oleh Kravchenko oleg@kaa.org.ua
[ Upstream commit 7c6082b903ac28dc3f383fba57c6f9e7e2594178 ]
Error was detected by PVS-Studio: V512 A call of the 'sprintf' function will lead to overflow of the buffer 'led_data->led_cdev_name'.
Acked-by: Jacek Anaszewski jacek.anaszewski@gmail.com Acked-by: Pavel Machek pavel@ucw.cz Signed-off-by: Oleh Kravchenko oleg@kaa.org.ua Signed-off-by: Pavel Machek pavel@ucw.cz Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/leds/leds-mlxreg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c index cabe379071a7c..82aea1cd0c125 100644 --- a/drivers/leds/leds-mlxreg.c +++ b/drivers/leds/leds-mlxreg.c @@ -228,8 +228,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv) brightness = LED_OFF; led_data->base_color = MLXREG_LED_GREEN_SOLID; } - sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg", - data->label); + snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name), + "mlxreg:%s", data->label); led_cdev->name = led_data->led_cdev_name; led_cdev->brightness = brightness; led_cdev->max_brightness = LED_ON;
From: Mike Snitzer snitzer@redhat.com
[ Upstream commit 6ba01df72b4b63a26b4977790f58d8f775d2992c ]
Partitioned request-based devices cannot be used as underlying devices for request-based DM because no partition offsets are added to each incoming request. As such, until now, stacking on partitioned devices would _always_ result in data corruption (e.g. wiping the partition table, writing to other partitions, etc). Fix this by disallowing request-based stacking on partitions.
While at it, since all .request_fn support has been removed from block core, remove legacy dm-table code that differentiated between blk-mq and .request_fn request-based.
Signed-off-by: Mike Snitzer snitzer@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/md/dm-table.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 52e049554f5cd..2ae0c19137667 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -918,21 +918,15 @@ bool dm_table_supports_dax(struct dm_table *t,
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-struct verify_rq_based_data { - unsigned sq_count; - unsigned mq_count; -}; - -static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - struct verify_rq_based_data *v = data; + struct block_device *bdev = dev->bdev; + struct request_queue *q = bdev_get_queue(bdev);
- if (queue_is_mq(q)) - v->mq_count++; - else - v->sq_count++; + /* request-based cannot stack on partitions! */ + if (bdev != bdev->bd_contains) + return false;
return queue_is_mq(q); } @@ -941,7 +935,6 @@ static int dm_table_determine_type(struct dm_table *t) { unsigned i; unsigned bio_based = 0, request_based = 0, hybrid = 0; - struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0}; struct dm_target *tgt; struct list_head *devices = dm_table_get_devices(t); enum dm_queue_mode live_md_type = dm_get_md_type(t->md); @@ -1045,14 +1038,10 @@ verify_rq_based:
/* Non-request-stackable devices can't be used for request-based dm */ if (!tgt->type->iterate_devices || - !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) { + !tgt->type->iterate_devices(tgt, device_is_rq_stackable, NULL)) { DMERR("table load rejected: including non-request-stackable devices"); return -EINVAL; } - if (v.sq_count > 0) { - DMERR("table load rejected: not all devices are blk-mq request-stackable"); - return -EINVAL; - }
return 0; }
From: Dmitry Osipenko digetx@gmail.com
[ Upstream commit 53b4b2aeee26f42cde5ff2a16dd0d8590c51a55a ]
There is another kHz-conversion bug in the code, resulting in integer overflow. Although, this time the resulting value is 4294966296 and it's close to ULONG_MAX, which is okay in this case.
Reviewed-by: Chanwoo Choi cw00.choi@samsung.com Tested-by: Peter Geis pgwipeout@gmail.com Signed-off-by: Dmitry Osipenko digetx@gmail.com Signed-off-by: Chanwoo Choi cw00.choi@samsung.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/devfreq/tegra30-devfreq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c index a6ba75f4106d8..e273011c83fbd 100644 --- a/drivers/devfreq/tegra30-devfreq.c +++ b/drivers/devfreq/tegra30-devfreq.c @@ -68,6 +68,8 @@
#define KHZ 1000
+#define KHZ_MAX (ULONG_MAX / KHZ) + /* Assume that the bus is saturated if the utilization is 25% */ #define BUS_SATURATION_RATIO 25
@@ -169,7 +171,7 @@ struct tegra_actmon_emc_ratio { };
static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { - { 1400000, ULONG_MAX }, + { 1400000, KHZ_MAX }, { 1200000, 750000 }, { 1100000, 600000 }, { 1000000, 500000 },
From: Pan Bian bianpan2016@163.com
[ Upstream commit ec990306f77fd4c58c3b27cc3b3c53032d6e6670 ]
The memory chunk io_req is released by mempool_free. Accessing io_req->start_time will result in a use after free bug. The variable start_time is a backup of the timestamp. So, use start_time here to avoid use after free.
Link: https://lore.kernel.org/r/1572881182-37664-1-git-send-email-bianpan2016@163.... Signed-off-by: Pan Bian bianpan2016@163.com Reviewed-by: Satish Kharat satishkh@cisco.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/fnic/fnic_scsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index e3f5c91d5e4fe..b60795893994c 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1027,7 +1027,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, atomic64_inc(&fnic_stats->io_stats.io_completions);
- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + io_duration_time = jiffies_to_msecs(jiffies) - + jiffies_to_msecs(start_time);
if(io_duration_time <= 10) atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
From: James Smart jsmart2021@gmail.com
[ Upstream commit 6c1e803eac846f886cd35131e6516fc51a8414b9 ]
When reading sysfs nvme_info file while a remote port leaves and comes back, a NULL pointer is encountered. The issue is due to ndlp list corruption as the the nvme_info_show does not use the same lock as the rest of the code.
Correct by removing the rcu_xxx_lock calls and replace by the host_lock and phba->hbaLock spinlocks that are used by the rest of the driver. Given we're called from sysfs, we are safe to use _irq rather than _irqsave.
Link: https://lore.kernel.org/r/20191105005708.7399-4-jsmart2021@gmail.com Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc_attr.c | 40 +++++++++++++++++------------------ 1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 25aa7a53d255e..bb973901b672d 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, int i; int len = 0; char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; - unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); @@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done;
- rcu_read_lock(); scnprintf(tmp, sizeof(tmp), "XRI Dist lpfc%d Total %d IO %d ELS %d\n", phba->brd_no, @@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.io_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done;
/* Port state is only one of two values for now. */ if (localport->port_id) @@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; + + spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; - spin_lock_irqsave(&vport->phba->hbalock, iflags); + spin_lock(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; - spin_unlock_irqrestore(&vport->phba->hbalock, iflags); + spin_unlock(&vport->phba->hbalock); if (!nrport) continue;
@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; }
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done;
/* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; }
scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } - rcu_read_unlock(); + spin_unlock_irq(shost->host_lock);
if (!lport) goto buffer_done; @@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */ + /* host_lock is already unlocked. */ goto buffer_done;
- rcu_unlock_buf_done: - rcu_read_unlock(); + unlock_buf_done: + spin_unlock_irq(shost->host_lock);
buffer_done: len = strnlen(buf, PAGE_SIZE);
From: Michael Ellerman mpe@ellerman.id.au
[ Upstream commit 6266a4dadb1d0976490fdf5af4f7941e36f64e80 ]
Otherwise the build fails because prom_init is calling symbols it's not allowed to, eg:
Error: External symbol 'ftrace_likely_update' referenced from prom_init.c make[3]: *** [arch/powerpc/kernel/Makefile:197: arch/powerpc/kernel/prom_init_check] Error 1
Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20191106051129.7626-1-mpe@ellerman.id.au Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kernel/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index dc0780f930d5b..59260eb962916 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -19,6 +19,7 @@ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) +CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code @@ -36,7 +37,6 @@ KASAN_SANITIZE_btext.o := n ifdef CONFIG_KASAN CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING -CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING endif
From: Eric Dumazet edumazet@google.com
[ Upstream commit 9ed498c6280a2f2b51d02df96df53037272ede49 ]
sk->sk_backlog.tail might be read without holding the socket spinlock, we need to add proper READ_ONCE()/WRITE_ONCE() to silence the warnings.
KCSAN reported :
BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg
write to 0xffff8881265109f8 of 8 bytes by interrupt on cpu 1: __sk_add_backlog include/net/sock.h:907 [inline] sk_add_backlog include/net/sock.h:938 [inline] tcp_add_backlog+0x476/0xce0 net/ipv4/tcp_ipv4.c:1759 tcp_v4_rcv+0x1a70/0x1bd0 net/ipv4/tcp_ipv4.c:1947 ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204 ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:442 [inline] ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:4929 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5043 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5133 napi_skb_finish net/core/dev.c:5596 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5629 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6311 [inline] net_rx_action+0x3ae/0xa90 net/core/dev.c:6379 __do_softirq+0x115/0x33f kernel/softirq.c:292 invoke_softirq kernel/softirq.c:373 [inline] irq_exit+0xbb/0xe0 kernel/softirq.c:413 exiting_irq arch/x86/include/asm/apic.h:536 [inline] do_IRQ+0xa6/0x180 arch/x86/kernel/irq.c:263 ret_from_intr+0x0/0x19 native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71 arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571 default_idle_call+0x1e/0x40 kernel/sched/idle.c:94 cpuidle_idle_call kernel/sched/idle.c:154 [inline] do_idle+0x1af/0x280 kernel/sched/idle.c:263 cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355 start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264 secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
read to 0xffff8881265109f8 of 8 bytes by task 8057 on cpu 0: tcp_recvmsg+0x46e/0x1b40 net/ipv4/tcp.c:2050 inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838 sock_recvmsg_nosec net/socket.c:871 [inline] sock_recvmsg net/socket.c:889 [inline] sock_recvmsg+0x92/0xb0 net/socket.c:885 sock_read_iter+0x15f/0x1e0 net/socket.c:967 call_read_iter include/linux/fs.h:1889 [inline] new_sync_read+0x389/0x4f0 fs/read_write.c:414 __vfs_read+0xb1/0xc0 fs/read_write.c:427 vfs_read fs/read_write.c:461 [inline] vfs_read+0x143/0x2c0 fs/read_write.c:446 ksys_read+0xd5/0x1b0 fs/read_write.c:587 __do_sys_read fs/read_write.c:597 [inline] __se_sys_read fs/read_write.c:595 [inline] __x64_sys_read+0x4c/0x60 fs/read_write.c:595 do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x44/0xa9
Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 8057 Comm: syz-fuzzer Not tainted 5.4.0-rc6+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Signed-off-by: Eric Dumazet edumazet@google.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/crypto/chelsio/chtls/chtls_io.c | 10 +++++----- include/net/sock.h | 4 ++-- net/ipv4/tcp.c | 2 +- net/llc/af_llc.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index ce1f1d5d7cd5a..c403d6b64e087 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break;
if (copied) { @@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, break; }
- if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); @@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk);
- if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break;
if (copied) { @@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } }
- if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); diff --git a/include/net/sock.h b/include/net/sock.h index 6d9c1131fe5c8..e6a48ebb22aa4 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -909,11 +909,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) skb_dst_force(skb);
if (!sk->sk_backlog.tail) - sk->sk_backlog.head = skb; + WRITE_ONCE(sk->sk_backlog.head, skb); else sk->sk_backlog.tail->next = skb;
- sk->sk_backlog.tail = skb; + WRITE_ONCE(sk->sk_backlog.tail, skb); skb->next = NULL; }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 01ddfb4156e4a..2ffa33b5ef404 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2053,7 +2053,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
/* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break;
if (copied) { diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 5abb7f9b7ee5f..fa0f3c1543ba5 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -784,7 +784,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } /* Well, if we have backlog, try to process it now yet. */
- if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break;
if (copied) {
From: Dan Williams dan.j.williams@intel.com
[ Upstream commit 460370ab20b6cc174256e46e192adf01e730faf6 ]
PFN flags are (unsigned long long), fix the alloc_dax_region() calling convention to fix warnings of the form:
include/linux/pfn_t.h:18:17: warning: large integer implicitly truncated to unsigned type [-Woverflow]
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
Reported-by: kbuild test robot lkp@intel.com Signed-off-by: Dan Williams dan.j.williams@intel.com Acked-by: Thomas Gleixner tglx@linutronix.de Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dax/bus.c | 2 +- drivers/dax/bus.h | 2 +- drivers/dax/dax-private.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 8fafbeab510a8..eccdda1f7b71b 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -227,7 +227,7 @@ static void dax_region_unregister(void *region)
struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct resource *res, int target_node, unsigned int align, - unsigned long pfn_flags) + unsigned long long pfn_flags) { struct dax_region *dax_region;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h index 8619e32999436..9e4eba67e8b98 100644 --- a/drivers/dax/bus.h +++ b/drivers/dax/bus.h @@ -11,7 +11,7 @@ struct dax_region; void dax_region_put(struct dax_region *dax_region); struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct resource *res, int target_node, unsigned int align, - unsigned long flags); + unsigned long long flags);
enum dev_dax_subsys { DEV_DAX_BUS, diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index 6ccca3b890d6f..3107ce80e8090 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -32,7 +32,7 @@ struct dax_region { struct device *dev; unsigned int align; struct resource res; - unsigned long pfn_flags; + unsigned long long pfn_flags; };
/**
From: Andreas Gruenbacher agruenba@redhat.com
[ Upstream commit add66fcbd3fbe5aa0dd4dddfa23e119c12989a27 ]
On architectures where loff_t is wider than pgoff_t, the expression ((page->index + 1) << PAGE_SHIFT) can overflow. Rewrite to use the page offset, which we already compute here anyway.
Signed-off-by: Andreas Gruenbacher agruenba@redhat.com Reviewed-by: Christoph Hellwig hch@lst.de Reviewed-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/iomap/buffered-io.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index e25901ae3ff44..a30ea7ecb790a 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1040,20 +1040,19 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
lock_page(page); size = i_size_read(inode); - if ((page->mapping != inode->i_mapping) || - (page_offset(page) > size)) { + offset = page_offset(page); + if (page->mapping != inode->i_mapping || offset > size) { /* We overload EFAULT to mean page got truncated */ ret = -EFAULT; goto out_unlock; }
/* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_SHIFT) > size) + if (offset > size - PAGE_SIZE) length = offset_in_page(size); else length = PAGE_SIZE;
- offset = page_offset(page); while (length > 0) { ret = iomap_apply(inode, offset, length, IOMAP_WRITE | IOMAP_FAULT, ops, page,
From: Jaegeuk Kim jaegeuk@kernel.org
[ Upstream commit bc005a4d5347da68e690f78d365d8927c87dc85a ]
xfstests/generic/475 complains kernel warn/panic while testing corrupted disk.
Reviewed-by: Chao Yu yuchao0@huawei.com Signed-off-by: Jaegeuk Kim jaegeuk@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/f2fs/node.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index daeac4268c1ab..e6f1b1d0c3b68 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2350,7 +2350,6 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (ret) { up_read(&nm_i->nat_tree_lock); - f2fs_bug_on(sbi, !mount); f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); return ret; }
From: Stephen Kitt steve@sk2.org
[ Upstream commit 7f6ac72946b88b89ee44c1c527aa8591ac5ffcbe ]
The buffer allocated in ti_adpll_clk_get_name doesn't account for the terminating null. This patch switches to devm_kasprintf to avoid overflowing.
Signed-off-by: Stephen Kitt steve@sk2.org Link: https://lkml.kernel.org/r/20191019140634.15596-1-steve@sk2.org Acked-by: Tony Lindgren tony@atomide.com Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/ti/adpll.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index fdfb90058504c..bb2f2836dab22 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, if (err) return NULL; } else { - const char *base_name = "adpll"; - char *buf; - - buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + - strlen(postfix), GFP_KERNEL); - if (!buf) - return NULL; - sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); - name = buf; + name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", + d->pa, postfix); }
return name;
From: Alex Deucher alexander.deucher@amd.com
[ Upstream commit 53dbc27ad5a93932ff1892a8e4ef266827d74a0f ]
When a custom powerplay table is provided, we need to update the OD VDDC flag to avoid AVFS being enabled when it shouldn't be.
Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205393 Reviewed-by: Evan Quan evan.quan@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index beacfffbdc3eb..ecbc9daea57e0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3691,6 +3691,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result);
+ /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr);
/*
From: Usha Ketineni usha.k.ketineni@intel.com
[ Upstream commit c0a3665f71a2f086800abea4d9d14d28269089d6 ]
This patch fixes the call trace caused by the kernel when the Rx/Tx descriptor size change request is initiated via ethtool when DCB is configured. ice_set_ringparam() should use vsi->num_txq instead of vsi->alloc_txq as it represents the queues that are enabled in the driver when DCB is enabled/disabled. Otherwise, queue index being used can go out of range.
For example, when vsi->alloc_txq has 104 queues and with 3 TCS enabled via DCB, each TC gets 34 queues, vsi->num_txq will be 102 and only 102 queues will be enabled.
Signed-off-by: Usha Ketineni usha.k.ketineni@intel.com Tested-by: Andrew Bowers andrewx.bowers@intel.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 62673e27af0e8..fc9ff985a62bd 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2635,14 +2635,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", vsi->tx_rings[0]->count, new_tx_cnt);
- tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); if (!tx_rings) { err = -ENOMEM; goto done; }
- for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_txq(vsi, i) { /* clone ring and setup updated count */ tx_rings[i] = *vsi->tx_rings[i]; tx_rings[i].count = new_tx_cnt; @@ -2667,14 +2667,14 @@ process_rx: netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", vsi->rx_rings[0]->count, new_rx_cnt);
- rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); if (!rx_rings) { err = -ENOMEM; goto done; }
- for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_rxq(vsi, i) { /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_cnt; @@ -2712,7 +2712,7 @@ process_link: ice_down(vsi);
if (tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) { + ice_for_each_txq(vsi, i) { ice_free_tx_ring(vsi->tx_rings[i]); *vsi->tx_rings[i] = tx_rings[i]; } @@ -2720,7 +2720,7 @@ process_link: }
if (rx_rings) { - for (i = 0; i < vsi->alloc_rxq; i++) { + ice_for_each_rxq(vsi, i) { ice_free_rx_ring(vsi->rx_rings[i]); /* copy the real tail offset */ rx_rings[i].tail = vsi->rx_rings[i]->tail; @@ -2744,7 +2744,7 @@ process_link: free_tx: /* error cleanup if the Rx allocations failed after getting Tx */ if (tx_rings) { - for (i = 0; i < vsi->alloc_txq; i++) + ice_for_each_txq(vsi, i) ice_free_tx_ring(&tx_rings[i]); devm_kfree(&pf->pdev->dev, tx_rings); }
From: Hou Tao houtao1@huawei.com
[ Upstream commit 03976af89e3bd9489d542582a325892e6a8cacc0 ]
Else there may be a double-free problem, because cfi->cfiq will be freed by mtd_do_chip_probe() if both the two invocations of check_cmd_set() return failure.
Signed-off-by: Hou Tao houtao1@huawei.com Reviewed-by: Richard Weinberger richard@nod.at Signed-off-by: Vignesh Raghavendra vigneshr@ti.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mtd/chips/cfi_cmdset_0002.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index a4f2d8cdca120..c8b9ab40a1027 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -794,7 +794,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) kfree(mtd->eraseregions); kfree(mtd); kfree(cfi->cmdset_priv); - kfree(cfi->cfiq); return NULL; }
From: Lee Jones lee.jones@linaro.org
[ Upstream commit b195e101580db390f50b0d587b7f66f241d2bc88 ]
If a child device calls mfd_cell_{en,dis}able() without an appropriate call-back being set, we are likely to encounter a panic. Avoid this by adding suitable checking.
Signed-off-by: Lee Jones lee.jones@linaro.org Reviewed-by: Daniel Thompson daniel.thompson@linaro.org Reviewed-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mfd/mfd-core.c | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 23276a80e3b48..96d02b6f06fd8 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -28,6 +28,11 @@ int mfd_cell_enable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0;
+ if (!cell->enable) { + dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); + return 0; + } + /* only call enable hook if the cell wasn't previously enabled */ if (atomic_inc_return(cell->usage_count) == 1) err = cell->enable(pdev); @@ -45,6 +50,11 @@ int mfd_cell_disable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0;
+ if (!cell->disable) { + dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); + return 0; + } + /* only disable if no other clients are using it */ if (atomic_dec_return(cell->usage_count) == 0) err = cell->disable(pdev);
From: Alex Deucher alexander.deucher@amd.com
[ Upstream commit 901245624c7812b6c95d67177bae850e783b5212 ]
When a custom powerplay table is provided, we need to update the OD VDDC flag to avoid AVFS being enabled when it shouldn't be.
Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205393 Reviewed-by: Evan Quan evan.quan@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index e6da53e9c3f46..edd6d4912edeb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3986,6 +3986,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
+ /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if (hwmgr->hardcode_pp_table != NULL) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + tmp_result = smu7_update_avfs(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update avfs voltages!",
From: Johan Hovold johan@kernel.org
[ Upstream commit 960fbd1ca584a5b4cd818255769769d42bfc6dbe ]
The driver would return success and leave the port structures half-initialised if any of the register accesses during probe fails.
This would specifically leave the port control urb unallocated, something which could trigger a NULL pointer dereference on interrupt events.
Fortunately the interrupt implementation is completely broken and has never even been enabled...
Note that the zero-length-enable register write used to set the zle-flag for all ports is moved to attach.
Reviewed-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Johan Hovold johan@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/usb/serial/mos7840.c | 48 +++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index ab4bf8d6d7df0..e105ff0eb92e5 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -2090,6 +2090,23 @@ static int mos7840_calc_num_ports(struct usb_serial *serial, return num_ports; }
+static int mos7840_attach(struct usb_serial *serial) +{ + struct device *dev = &serial->interface->dev; + int status; + u16 val; + + /* Zero Length flag enable */ + val = 0x0f; + status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val); + if (status < 0) + dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status); + else + dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status); + + return status; +} + static int mos7840_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; @@ -2147,7 +2164,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) mos7840_port->ControlRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status); Data |= 0x08; /* setting driver done bit */ @@ -2159,7 +2176,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) mos7840_port->ControlRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
@@ -2170,7 +2187,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) (__u16) (mos7840_port->DcrRegOffset + 0), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
@@ -2179,7 +2196,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) (__u16) (mos7840_port->DcrRegOffset + 1), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
@@ -2188,7 +2205,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) (__u16) (mos7840_port->DcrRegOffset + 2), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
@@ -2197,7 +2214,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
@@ -2214,7 +2231,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status); - goto out; + goto error; } else dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
@@ -2228,7 +2245,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num))); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status); - goto out; + goto error; } else dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status); } else { @@ -2240,7 +2257,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1)); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status); - goto out; + goto error; } else dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
@@ -2280,17 +2297,7 @@ static int mos7840_port_probe(struct usb_serial_port *port) /* Turn off LED */ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); } -out: - if (pnum == serial->num_ports - 1) { - /* Zero Length flag enable */ - Data = 0x0f; - status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data); - if (status < 0) { - dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status); - goto error; - } else - dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status); - } + return 0; error: kfree(mos7840_port->led_dr); @@ -2346,6 +2353,7 @@ static struct usb_serial_driver moschip7840_4port_device = { .unthrottle = mos7840_unthrottle, .calc_num_ports = mos7840_calc_num_ports, .probe = mos7840_probe, + .attach = mos7840_attach, .ioctl = mos7840_ioctl, .get_serial = mos7840_get_serial_info, .set_termios = mos7840_set_termios,
On Thu, Sep 17, 2020 at 09:56:21PM -0400, Sasha Levin wrote:
From: Johan Hovold johan@kernel.org
[ Upstream commit 960fbd1ca584a5b4cd818255769769d42bfc6dbe ]
The driver would return success and leave the port structures half-initialised if any of the register accesses during probe fails.
This would specifically leave the port control urb unallocated, something which could trigger a NULL pointer dereference on interrupt events.
Fortunately the interrupt implementation is completely broken and has never even been enabled...
Note that the zero-length-enable register write used to set the zle-flag for all ports is moved to attach.
Reviewed-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Johan Hovold johan@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org
Please drop this from all stable queues. As the commit message and missing stable-cc tag suggests, it's not needed.
Sasha, please stop sending AUTOSEL patches for usb-serial. I think this the fourth time I ask you now.
Johan
On Fri, Sep 18, 2020 at 08:53:00AM +0200, Johan Hovold wrote:
On Thu, Sep 17, 2020 at 09:56:21PM -0400, Sasha Levin wrote:
From: Johan Hovold johan@kernel.org
[ Upstream commit 960fbd1ca584a5b4cd818255769769d42bfc6dbe ]
The driver would return success and leave the port structures half-initialised if any of the register accesses during probe fails.
This would specifically leave the port control urb unallocated, something which could trigger a NULL pointer dereference on interrupt events.
Fortunately the interrupt implementation is completely broken and has never even been enabled...
Note that the zero-length-enable register write used to set the zle-flag for all ports is moved to attach.
Reviewed-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Johan Hovold johan@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org
Please drop this from all stable queues. As the commit message and missing stable-cc tag suggests, it's not needed.
Sasha, please stop sending AUTOSEL patches for usb-serial. I think this the fourth time I ask you now.
Right, this series is a bit different because it didn't originate from the AUTOSEL work but rather was an audit of patches picked up by downstream kernels as fixes.
I'll drop it, sorry for the noise.
From: Ivan Lazeev ivan.lazeev@gmail.com
[ Upstream commit 3ef193822b25e9ee629974f66dc1ff65167f770c ]
Bug link: https://bugzilla.kernel.org/show_bug.cgi?id=195657
cmd/rsp buffers are expected to be in the same ACPI region. For Zen+ CPUs BIOS's might report two different regions, some of them also report region sizes inconsistent with values from TPM registers.
Memory configuration on ASRock x470 ITX:
db0a0000-dc59efff : Reserved dc57e000-dc57efff : MSFT0101:00 dc582000-dc582fff : MSFT0101:00
Work around the issue by storing ACPI regions declared for the device in a fixed array and adding an array for pointers to corresponding possibly allocated resources in crb_map_io function. This data was previously held for a single resource in struct crb_priv (iobase field) and local variable io_res in crb_map_io function. ACPI resources array is used to find index of corresponding region for each buffer and make the buffer size consistent with region's length. Array of pointers to allocated resources is used to map the region at most once.
Signed-off-by: Ivan Lazeev ivan.lazeev@gmail.com Tested-by: Jerry Snitselaar jsnitsel@redhat.com Tested-by: Jarkko Sakkinen jarkko.sakkinen@linux.intel.com Reviewed-by: Jarkko Sakkinen jarkko.sakkinen@linux.intel.com Signed-off-by: Jarkko Sakkinen jarkko.sakkinen@linux.intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/tpm/tpm_crb.c | 123 +++++++++++++++++++++++++++---------- 1 file changed, 90 insertions(+), 33 deletions(-)
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index e59f1f91d7f3e..a9dcf31eadd21 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -22,6 +22,7 @@ #include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2" +#define TPM_CRB_MAX_RESOURCES 3
static const guid_t crb_acpi_start_guid = GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, @@ -91,7 +92,6 @@ enum crb_status { struct crb_priv { u32 sm; const char *hid; - void __iomem *iobase; struct crb_regs_head __iomem *regs_h; struct crb_regs_tail __iomem *regs_t; u8 __iomem *cmd; @@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_check_resource(struct acpi_resource *ares, void *data) { - struct resource *io_res = data; + struct resource *iores_array = data; struct resource_win win; struct resource *res = &(win.res); + int i;
if (acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win)) { - *io_res = *res; - io_res->name = NULL; + for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) { + if (resource_type(iores_array + i) != IORESOURCE_MEM) { + iores_array[i] = *res; + iores_array[i].name = NULL; + break; + } + } }
return 1; }
-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, - struct resource *io_res, u64 start, u32 size) +static void __iomem *crb_map_res(struct device *dev, struct resource *iores, + void __iomem **iobase_ptr, u64 start, u32 size) { struct resource new_res = { .start = start, @@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL);
- if (!resource_contains(io_res, &new_res)) + if (!iores) return devm_ioremap_resource(dev, &new_res);
- return priv->iobase + (new_res.start - io_res->start); + if (!*iobase_ptr) { + *iobase_ptr = devm_ioremap_resource(dev, iores); + if (IS_ERR(*iobase_ptr)) + return *iobase_ptr; + } + + return *iobase_ptr + (new_res.start - iores->start); }
/* @@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { - struct list_head resources; - struct resource io_res; + struct list_head acpi_resource_list; + struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} }; + void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL}; struct device *dev = &device->dev; + struct resource *iores; + void __iomem **iobase_ptr; + int i; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; @@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, u32 rsp_size; int ret;
- INIT_LIST_HEAD(&resources); - ret = acpi_dev_get_resources(device, &resources, crb_check_resource, - &io_res); + INIT_LIST_HEAD(&acpi_resource_list); + ret = acpi_dev_get_resources(device, &acpi_resource_list, + crb_check_resource, iores_array); if (ret < 0) return ret; - acpi_dev_free_resource_list(&resources); + acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(&io_res) != IORESOURCE_MEM) { + if (resource_type(iores_array) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; + } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) == + IORESOURCE_MEM) { + dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n"); + memset(iores_array + TPM_CRB_MAX_RESOURCES, + 0, sizeof(*iores_array)); + iores_array[TPM_CRB_MAX_RESOURCES].flags = 0; }
- priv->iobase = devm_ioremap_resource(dev, &io_res); - if (IS_ERR(priv->iobase)) - return PTR_ERR(priv->iobase); + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (buf->control_address >= iores_array[i].start && + buf->control_address + sizeof(struct crb_regs_tail) - 1 <= + iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address, + sizeof(struct crb_regs_tail)); + + if (IS_ERR(priv->regs_t)) + return PTR_ERR(priv->regs_t);
/* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older @@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { - if (buf->control_address == io_res.start + + if (iores && + buf->control_address == iores->start + sizeof(*priv->regs_h)) - priv->regs_h = priv->iobase; + priv->regs_h = *iobase_ptr; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } @@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, if (ret) return ret;
- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, - sizeof(struct crb_regs_tail)); - if (IS_ERR(priv->regs_t)) { - ret = PTR_ERR(priv->regs_t); - goto out_relinquish_locality; - } - /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. @@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; - cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, - ioread32(&priv->regs_t->ctrl_cmd_size)); + cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; iores_array[i].end; ++i) { + if (cmd_pa >= iores_array[i].start && + cmd_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size);
- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); + priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; @@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); - rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, - ioread32(&priv->regs_t->ctrl_rsp_size)); + rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (rsp_pa >= iores_array[i].start && + rsp_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
if (cmd_pa != rsp_pa) { - priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); + priv->rsp = crb_map_res(dev, iores, iobase_ptr, + rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; }
From: Divya Indi divya.indi@oracle.com
[ Upstream commit e585e6469d6f476b82aa148dc44aaf7ae269a4e2 ]
A trace array can be destroyed from userspace or kernel. Verify if the trace array exists before proceeding to destroy/remove it.
Link: http://lkml.kernel.org/r/1565805327-579-3-git-send-email-divya.indi@oracle.c...
Reviewed-by: Aruna Ramakrishna aruna.ramakrishna@oracle.com Signed-off-by: Divya Indi divya.indi@oracle.com [ Removed unneeded braces ] Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/module.c | 6 +++++- kernel/trace/trace.c | 15 ++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/kernel/module.c b/kernel/module.c index 819c5d3b4c295..0e3743dd3a568 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3753,7 +3753,6 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_enable_ro(mod, false); module_enable_nx(mod); - module_enable_x(mod);
/* Mark state as coming so strong_try_module_get() ignores us, * but kallsyms etc. can see us. */ @@ -3776,6 +3775,11 @@ static int prepare_coming_module(struct module *mod) if (err) return err;
+ /* Make module executable after ftrace is enabled */ + mutex_lock(&module_mutex); + module_enable_x(mod); + mutex_unlock(&module_mutex); + blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); return 0; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f9c2bdbbd8936..cd3d91554aff1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8502,17 +8502,26 @@ static int __remove_instance(struct trace_array *tr) return 0; }
-int trace_array_destroy(struct trace_array *tr) +int trace_array_destroy(struct trace_array *this_tr) { + struct trace_array *tr; int ret;
- if (!tr) + if (!this_tr) return -EINVAL;
mutex_lock(&event_mutex); mutex_lock(&trace_types_lock);
- ret = __remove_instance(tr); + ret = -ENODEV; + + /* Making sure trace array exists before destroying it. */ + list_for_each_entry(tr, &ftrace_trace_arrays, list) { + if (tr == this_tr) { + ret = __remove_instance(tr); + break; + } + }
mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex);
From: Divya Indi divya.indi@oracle.com
[ Upstream commit 953ae45a0c25e09428d4a03d7654f97ab8a36647 ]
As part of commit f45d1225adb0 ("tracing: Kernel access to Ftrace instances") we exported certain functions. Here, we are adding some additional NULL checks to ensure safe usage by users of these APIs.
Link: http://lkml.kernel.org/r/1565805327-579-4-git-send-email-divya.indi@oracle.c...
Signed-off-by: Divya Indi divya.indi@oracle.com Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/trace/trace.c | 3 +++ kernel/trace/trace_events.c | 2 ++ 2 files changed, 5 insertions(+)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cd3d91554aff1..9007f5edbb207 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3233,6 +3233,9 @@ int trace_array_printk(struct trace_array *tr, if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0;
+ if (!tr) + return -ENOENT; + va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ed9eb97b64b47..309b2b3c5349e 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -793,6 +793,8 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) char *event = NULL, *sub = NULL, *match; int ret;
+ if (!tr) + return -ENOENT; /* * The buf format can be <subsystem>:<event-name> * *:<event-name> means any event by that name.
From: Guoju Fang fangguoju@gmail.com
[ Upstream commit 34cf78bf34d48dddddfeeadb44f9841d7864997a ]
This patch fix a lost wake-up problem caused by the race between mca_cannibalize_lock and bch_cannibalize_unlock.
Consider two processes, A and B. Process A is executing mca_cannibalize_lock, while process B takes c->btree_cache_alloc_lock and is executing bch_cannibalize_unlock. The problem happens that after process A executes cmpxchg and will execute prepare_to_wait. In this timeslice process B executes wake_up, but after that process A executes prepare_to_wait and set the state to TASK_INTERRUPTIBLE. Then process A goes to sleep but no one will wake up it. This problem may cause bcache device to dead.
Signed-off-by: Guoju Fang fangguoju@gmail.com Signed-off-by: Coly Li colyli@suse.de Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/md/bcache/bcache.h | 1 + drivers/md/bcache/btree.c | 12 ++++++++---- drivers/md/bcache/super.c | 1 + 3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 217c838a1b405..859567ad3db4e 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -585,6 +585,7 @@ struct cache_set { */ wait_queue_head_t btree_cache_wait; struct task_struct *btree_cache_alloc_lock; + spinlock_t btree_cannibalize_lock;
/* * When we free a btree node, we increment the gen of the bucket the diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 46556bde032e2..8d06105fc9ff5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -886,15 +886,17 @@ out:
static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) { - struct task_struct *old; - - old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); - if (old && old != current) { + spin_lock(&c->btree_cannibalize_lock); + if (likely(c->btree_cache_alloc_lock == NULL)) { + c->btree_cache_alloc_lock = current; + } else if (c->btree_cache_alloc_lock != current) { if (op) prepare_to_wait(&c->btree_cache_wait, &op->wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&c->btree_cannibalize_lock); return -EINTR; } + spin_unlock(&c->btree_cannibalize_lock);
return 0; } @@ -929,10 +931,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, */ static void bch_cannibalize_unlock(struct cache_set *c) { + spin_lock(&c->btree_cannibalize_lock); if (c->btree_cache_alloc_lock == current) { c->btree_cache_alloc_lock = NULL; wake_up(&c->btree_cache_wait); } + spin_unlock(&c->btree_cannibalize_lock); }
static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2cbfcd99b7ee7..63f5ce18311bb 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1798,6 +1798,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) sema_init(&c->sb_write_mutex, 1); mutex_init(&c->bucket_lock); init_waitqueue_head(&c->btree_cache_wait); + spin_lock_init(&c->btree_cannibalize_lock); init_waitqueue_head(&c->bucket_wait); init_waitqueue_head(&c->gc_wait); sema_init(&c->uuid_write_mutex, 1);
From: Satendra Singh Thakur sst2005@gmail.com
[ Upstream commit 1ff95243257fad07290dcbc5f7a6ad79d6e703e2 ]
When devm_request_irq fails, currently, the function dma_async_device_unregister gets called. This doesn't free the resources allocated by of_dma_controller_register. Therefore, we have called of_dma_controller_free for this purpose.
Signed-off-by: Satendra Singh Thakur sst2005@gmail.com Link: https://lore.kernel.org/r/20191109113523.6067-1-sst2005@gmail.com Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dma/mediatek/mtk-hsdma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 1a2028e1c29e9..4c58da7421432 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); - goto err_unregister; + goto err_free; }
platform_set_drvdata(pdev, hsdma); @@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
+err_free: + of_dma_controller_free(pdev->dev.of_node); err_unregister: dma_async_device_unregister(dd);
From: Lianbo Jiang lijiang@redhat.com
[ Upstream commit 6f599d84231fd27e42f4ca2a786a6641e8cddf00 ]
On x86, purgatory() copies the first 640K of memory to a backup region because the kernel needs those first 640K for the real mode trampoline during boot, among others.
However, when SME is enabled, the kernel cannot properly copy the old memory to the backup area but reads only its encrypted contents. The result is that the crash tool gets invalid pointers when parsing vmcore:
crash> kmem -s|grep -i invalid kmem: dma-kmalloc-512: slab:ffffd77680001c00 invalid freepointer:a6086ac099f0c5a4 kmem: dma-kmalloc-512: slab:ffffd77680001c00 invalid freepointer:a6086ac099f0c5a4 crash>
So reserve the remaining low 1M memory when the crashkernel option is specified (after reserving real mode memory) so that allocated memory does not fall into the low 1M area and thus the copying of the contents of the first 640k to a backup region in purgatory() can be avoided altogether.
This way, it does not need to be included in crash dumps or used for anything except the trampolines that must live in the low 1M.
[ bp: Heavily rewrite commit message, flip check logic in crash_reserve_low_1M().]
Signed-off-by: Lianbo Jiang lijiang@redhat.com Signed-off-by: Borislav Petkov bp@suse.de Cc: bhe@redhat.com Cc: Dave Young dyoung@redhat.com Cc: d.hatayama@fujitsu.com Cc: dhowells@redhat.com Cc: ebiederm@xmission.com Cc: horms@verge.net.au Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: Jürgen Gross jgross@suse.com Cc: kexec@lists.infradead.org Cc: Peter Zijlstra peterz@infradead.org Cc: Thomas Gleixner tglx@linutronix.de Cc: Tom Lendacky thomas.lendacky@amd.com Cc: vgoyal@redhat.com Cc: x86-ml x86@kernel.org Link: https://lkml.kernel.org/r/20191108090027.11082-2-lijiang@redhat.com Link: https://bugzilla.kernel.org/show_bug.cgi?id=204793 Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/include/asm/crash.h | 6 ++++++ arch/x86/kernel/crash.c | 15 +++++++++++++++ arch/x86/realmode/init.c | 2 ++ 3 files changed, 23 insertions(+)
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index ef5638f641f2b..88eadd08ad708 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -10,4 +10,10 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params); void crash_smp_send_stop(void);
+#ifdef CONFIG_KEXEC_CORE +void __init crash_reserve_low_1M(void); +#else +static inline void __init crash_reserve_low_1M(void) { } +#endif + #endif /* _ASM_X86_CRASH_H */ diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index eb651fbde92ac..ff25a2ea271cf 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -24,6 +24,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/memblock.h>
#include <asm/processor.h> #include <asm/hardirq.h> @@ -39,6 +40,7 @@ #include <asm/virtext.h> #include <asm/intel_pt.h> #include <asm/crash.h> +#include <asm/cmdline.h>
/* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { @@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void) rcu_read_unlock(); }
+/* + * When the crashkernel option is specified, only use the low + * 1M for the real mode trampoline. + */ +void __init crash_reserve_low_1M(void) +{ + if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0) + return; + + memblock_reserve(0, 1<<20); + pr_info("Reserving the low 1M of memory for crashkernel\n"); +} + #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs) diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 7dce39c8c034a..262f83cad3551 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -8,6 +8,7 @@ #include <asm/pgtable.h> #include <asm/realmode.h> #include <asm/tlbflush.h> +#include <asm/crash.h>
struct real_mode_header *real_mode_header; u32 *trampoline_cr4_features; @@ -34,6 +35,7 @@ void __init reserve_real_mode(void)
memblock_reserve(mem, size); set_real_mode_mem(mem); + crash_reserve_low_1M(); }
static void __init setup_real_mode(void)
From: Pan Bian bianpan2016@163.com
[ Upstream commit 960657b732e1ce21b07be5ab48a7ad3913d72ba4 ]
Move the release operation after error log to avoid possible use after free.
Link: https://lore.kernel.org/r/1573021434-18768-1-git-send-email-bianpan2016@163.... Signed-off-by: Pan Bian bianpan2016@163.com Acked-by: Michal Kalderon michal.kalderon@marvell.com Reviewed-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index a7a926b7b5628..6dea49e11f5f0 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -490,10 +490,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
if ((!dst) || dst->error) { if (dst) { - dst_release(dst); DP_ERR(dev, "ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return -EINVAL; }
From: Pan Bian bianpan2016@163.com
[ Upstream commit da046d5f895fca18d63b15ac8faebd5bf784e23a ]
Release variable dst after logging dst->error to avoid possible use after free.
Link: https://lore.kernel.org/r/1573022651-37171-1-git-send-email-bianpan2016@163.... Signed-off-by: Pan Bian bianpan2016@163.com Reviewed-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index b1df93b69df44..fa7a5ff498c73 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2074,9 +2074,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); if (!dst || dst->error) { if (dst) { - dst_release(dst); i40iw_pr_err("ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return rc; }
From: Nicholas Johnson nicholas.johnson-opensource@outlook.com.au
[ Upstream commit c13704f5685deb7d6eb21e293233e0901ed77377 ]
Previously, the kernel sometimes assigned more MMIO or MMIO_PREF space than desired. For example, if the user requested 128M of space with "pci=realloc,hpmemsize=128M", we sometimes assigned 256M:
pci 0000:06:01.0: BAR 14: assigned [mem 0x90100000-0xa00fffff] = 256M pci 0000:06:04.0: BAR 14: assigned [mem 0xa0200000-0xb01fffff] = 256M
With this patch applied:
pci 0000:06:01.0: BAR 14: assigned [mem 0x90100000-0x980fffff] = 128M pci 0000:06:04.0: BAR 14: assigned [mem 0x98200000-0xa01fffff] = 128M
This happened when in the first pass, the MMIO_PREF succeeded but the MMIO failed. In the next pass, because MMIO_PREF was already assigned, the attempt to assign MMIO_PREF returned an error code instead of success (nothing more to do, already allocated). Hence, the size which was actually allocated, but thought to have failed, was placed in the MMIO window.
The bug resulted in the MMIO_PREF being added to the MMIO window, which meant doubling if MMIO_PREF size = MMIO size. With a large MMIO_PREF, the MMIO window would likely fail to be assigned altogether due to lack of 32-bit address space.
Change find_free_bus_resource() to do the following:
- Return first unassigned resource of the correct type. - If there is none, return first assigned resource of the correct type. - If none of the above, return NULL.
Returning an assigned resource of the correct type allows the caller to distinguish between already assigned and no resource of the correct type.
Add checks in pbus_size_io() and pbus_size_mem() to return success if resource returned from find_free_bus_resource() is already allocated.
This avoids pbus_size_io() and pbus_size_mem() returning error code to __pci_bus_size_bridges() when a resource has been successfully assigned in a previous pass. This fixes the existing behaviour where space for a resource could be reserved multiple times in different parent bridge windows.
Link: https://lore.kernel.org/lkml/20190531171216.20532-2-logang@deltatee.com/T/#u Link: https://bugzilla.kernel.org/show_bug.cgi?id=203243 Link: https://lore.kernel.org/r/PS2P216MB075563AA6AD242AA666EDC6A80760@PS2P216MB07... Reported-by: Kit Chow kchow@gigaio.com Reported-by: Nicholas Johnson nicholas.johnson-opensource@outlook.com.au Signed-off-by: Nicholas Johnson nicholas.johnson-opensource@outlook.com.au Signed-off-by: Bjorn Helgaas bhelgaas@google.com Reviewed-by: Mika Westerberg mika.westerberg@linux.intel.com Reviewed-by: Logan Gunthorpe logang@deltatee.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pci/setup-bus.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 5356630e0e483..44f4866d95d8c 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) }
/* - * Helper function for sizing routines: find first available bus resource - * of a given type. Note: we intentionally skip the bus resources which - * have already been assigned (that is, have non-NULL parent resource). + * Helper function for sizing routines. Assigned resources have non-NULL + * parent resource. + * + * Return first unassigned resource of the correct type. If there is none, + * return first assigned resource of the correct type. If none of the + * above, return NULL. + * + * Returning an assigned resource of the correct type allows the caller to + * distinguish between already assigned and no resource of the correct type. */ -static struct resource *find_free_bus_resource(struct pci_bus *bus, - unsigned long type_mask, - unsigned long type) +static struct resource *find_bus_resource_of_type(struct pci_bus *bus, + unsigned long type_mask, + unsigned long type) { + struct resource *r, *r_assigned = NULL; int i; - struct resource *r;
pci_bus_for_each_resource(bus, r, i) { if (r == &ioport_resource || r == &iomem_resource) continue; if (r && (r->flags & type_mask) == type && !r->parent) return r; + if (r && (r->flags & type_mask) == type && !r_assigned) + r_assigned = r; } - return NULL; + return r_assigned; }
static resource_size_t calculate_iosize(resource_size_t size, @@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, struct list_head *realloc_head) { struct pci_dev *dev; - struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, - IORESOURCE_IO); + struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO, + IORESOURCE_IO); resource_size_t size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; resource_size_t min_align, align; @@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, if (!b_res) return;
+ /* If resource is already assigned, nothing more to do */ + if (b_res->parent) + return; + min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { int i; @@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, resource_size_t min_align, align, size, size0, size1; resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */ int order, max_order; - struct resource *b_res = find_free_bus_resource(bus, + struct resource *b_res = find_bus_resource_of_type(bus, mask | IORESOURCE_PREFETCH, type); resource_size_t children_add_size = 0; resource_size_t children_add_align = 0; @@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (!b_res) return -ENOSPC;
+ /* If resource is already assigned, nothing more to do */ + if (b_res->parent) + return 0; + memset(aligns, 0, sizeof(aligns)); max_order = 0; size = 0;
From: Al Viro viro@zeniv.linux.org.uk
[ Upstream commit e84009336711d2bba885fc9cea66348ddfce3758 ]
We are overoptimistic about taking the fast path there; seeing the same value in ->d_parent after having grabbed a reference to that parent does *not* mean that it has remained our parent all along.
That wouldn't be a big deal (in the end it is our parent and we have grabbed the reference we are about to return), but... the situation with barriers is messed up.
We might have hit the following sequence:
d is a dentry of /tmp/a/b CPU1: CPU2: parent = d->d_parent (i.e. dentry of /tmp/a) rename /tmp/a/b to /tmp/b rmdir /tmp/a, making its dentry negative grab reference to parent, end up with cached parent->d_inode (NULL) mkdir /tmp/a, rename /tmp/b to /tmp/a/b recheck d->d_parent, which is back to original decide that everything's fine and return the reference we'd got.
The trouble is, caller (on CPU1) will observe dget_parent() returning an apparently negative dentry. It actually is positive, but CPU1 has stale ->d_inode cached.
Use d->d_seq to see if it has been moved instead of rechecking ->d_parent. NOTE: we are *NOT* going to retry on any kind of ->d_seq mismatch; we just go into the slow path in such case. We don't wait for ->d_seq to become even either - again, if we are racing with renames, we can bloody well go to slow path anyway.
Signed-off-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/dcache.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/dcache.c b/fs/dcache.c index e88cf0554e659..b2a7f1765f0b1 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -903,17 +903,19 @@ struct dentry *dget_parent(struct dentry *dentry) { int gotref; struct dentry *ret; + unsigned seq;
/* * Do optimistic parent lookup without any * locking. */ rcu_read_lock(); + seq = raw_seqcount_begin(&dentry->d_seq); ret = READ_ONCE(dentry->d_parent); gotref = lockref_get_not_zero(&ret->d_lockref); rcu_read_unlock(); if (likely(gotref)) { - if (likely(ret == READ_ONCE(dentry->d_parent))) + if (!read_seqcount_retry(&dentry->d_seq, seq)) return ret; dput(ret); }
From: Brian Foster bfoster@redhat.com
[ Upstream commit 2a2b5932db67586bacc560cc065d62faece5b996 ]
The leaf format xattr addition helper xfs_attr3_leaf_add_work() adjusts the block freemap in a couple places. The first update drops the size of the freemap that the caller had already selected to place the xattr name/value data. Before the function returns, it also checks whether the entries array has encroached on a freemap range by virtue of the new entry addition. This is necessary because the entries array grows from the start of the block (but end of the block header) towards the end of the block while the name/value data grows from the end of the block in the opposite direction. If the associated freemap is already empty, however, size is zero and the subtraction underflows the field and causes corruption.
This is reproduced rarely by generic/070. The observed behavior is that a smaller sized freemap is aligned to the end of the entries list, several subsequent xattr additions land in larger freemaps and the entries list expands into the smaller freemap until it is fully consumed and then underflows. Note that it is not otherwise a corruption for the entries array to consume an empty freemap because the nameval list (i.e. the firstused pointer in the xattr header) starts beyond the end of the corrupted freemap.
Update the freemap size modification to account for the fact that the freemap entry can be empty and thus stale.
Signed-off-by: Brian Foster bfoster@redhat.com Reviewed-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/libxfs/xfs_attr_leaf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index b133209f3aa6a..f1535549d1ced 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -1451,7 +1451,9 @@ xfs_attr3_leaf_add_work( for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { if (ichdr->freemap[i].base == tmp) { ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); - ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); + ichdr->freemap[i].size -= + min_t(uint16_t, ichdr->freemap[i].size, + sizeof(xfs_attr_leaf_entry_t)); } } ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
From: Christophe JAILLET christophe.jaillet@wanadoo.fr
[ Upstream commit 9067f2f0b41d7e817fc8c5259bab1f17512b0147 ]
We should jump to fail3 in order to undo the 'xa_insert_irq()' call.
Link: https://lore.kernel.org/r/20190923190746.10964-1-christophe.jaillet@wanadoo.... Signed-off-by: Christophe JAILLET christophe.jaillet@wanadoo.fr Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/hw/cxgb4/cm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 6b4e7235d2f56..30e08bcc9afb5 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3382,7 +3382,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) - goto fail2; + goto fail3; }
/* find a route */ @@ -3404,7 +3404,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { err = pick_local_ip6addrs(dev, cm_id); if (err) - goto fail2; + goto fail3; }
/* find a route */
From: Sascha Hauer s.hauer@pengutronix.de
[ Upstream commit f9c34bb529975fe9f85b870a80c53a83a3c5a182 ]
When a new fastmap is about to be written UBI must make sure it has a free block for a fastmap anchor available. For this ubi_update_fastmap() calls ubi_ensure_anchor_pebs(). This stopped working with 2e8f08deabbc ("ubi: Fix races around ubi_refill_pools()"), with this commit the wear leveling code is blocked and can no longer produce free PEBs. UBI then more often than not falls back to write the new fastmap anchor to the same block it was already on which means the same erase block gets erased during each fastmap write and wears out quite fast.
As the locking prevents us from producing the anchor PEB when we actually need it, this patch changes the strategy for creating the anchor PEB. We no longer create it on demand right before we want to write a fastmap, but instead we create an anchor PEB right after we have written a fastmap. This gives us enough time to produce a new anchor PEB before it is needed. To make sure we have an anchor PEB for the very first fastmap write we call ubi_ensure_anchor_pebs() during initialisation as well.
Fixes: 2e8f08deabbc ("ubi: Fix races around ubi_refill_pools()") Signed-off-by: Sascha Hauer s.hauer@pengutronix.de Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mtd/ubi/fastmap-wl.c | 31 ++++++++++++++++++------------- drivers/mtd/ubi/fastmap.c | 14 +++++--------- drivers/mtd/ubi/ubi.h | 6 ++++-- drivers/mtd/ubi/wl.c | 32 ++++++++++++++------------------ drivers/mtd/ubi/wl.h | 1 - 5 files changed, 41 insertions(+), 43 deletions(-)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index c44c8470247e1..426820ab9afe1 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -57,18 +57,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, } }
-static int anchor_pebs_available(struct rb_root *root) -{ - struct rb_node *p; - struct ubi_wl_entry *e; - - ubi_rb_for_each_entry(p, e, root, u.rb) - if (e->pnum < UBI_FM_MAX_START) - return 1; - - return 0; -} - /** * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. * @ubi: UBI device description object @@ -277,8 +265,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock); + + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* No luck, trigger wear leveling to produce a new anchor PEB */ + ubi->fm_do_produce_anchor = 1; if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -294,7 +300,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) return -ENOMEM; }
- wrk->anchor = 1; wrk->func = &wear_leveling_worker; __schedule_ubi_work(ubi, wrk); return 0; diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 604772fc4a965..53f448e7433a9 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1543,14 +1543,6 @@ int ubi_update_fastmap(struct ubi_device *ubi) return 0; }
- ret = ubi_ensure_anchor_pebs(ubi); - if (ret) { - up_write(&ubi->fm_eba_sem); - up_write(&ubi->work_sem); - up_write(&ubi->fm_protect); - return ret; - } - new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); if (!new_fm) { up_write(&ubi->fm_eba_sem); @@ -1621,7 +1613,8 @@ int ubi_update_fastmap(struct ubi_device *ubi) }
spin_lock(&ubi->wl_lock); - tmp_e = ubi_wl_get_fm_peb(ubi, 1); + tmp_e = ubi->fm_anchor; + ubi->fm_anchor = NULL; spin_unlock(&ubi->wl_lock);
if (old_fm) { @@ -1673,6 +1666,9 @@ out_unlock: up_write(&ubi->work_sem); up_write(&ubi->fm_protect); kfree(old_fm); + + ubi_ensure_anchor_pebs(ubi); + return ret;
err: diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 721b6aa7936cf..a173eb707bddb 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -491,6 +491,8 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap + * @fm_anchor: The next anchor PEB to use for fastmap + * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks @@ -599,6 +601,8 @@ struct ubi_device { struct work_struct fm_work; int fm_work_scheduled; int fast_attach; + struct ubi_wl_entry *fm_anchor; + int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */ struct rb_root used; @@ -789,7 +793,6 @@ struct ubi_attach_info { * @vol_id: the volume ID on which this erasure is being performed * @lnum: the logical eraseblock number * @torture: if the physical eraseblock has to be tortured - * @anchor: produce a anchor PEB to by used by fastmap * * The @func pointer points to the worker function. If the @shutdown argument is * not zero, the worker has to free the resources and exit immediately as the @@ -805,7 +808,6 @@ struct ubi_work { int vol_id; int lnum; int torture; - int anchor; };
#include "debug.h" diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 3fcdefe2714d0..5d77a38dba542 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, } }
- /* If no fastmap has been written and this WL entry can be used - * as anchor PEB, hold it back and return the second best WL entry - * such that fastmap can use the anchor PEB later. */ - if (prev_e && !ubi->fm_disabled && - !ubi->fm && e->pnum < UBI_FM_MAX_START) - return prev_e; - return e; }
@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; int erase = 0, keep = 0, vol_id = -1, lnum = -1; -#ifdef CONFIG_MTD_UBI_FASTMAP - int anchor = wrk->anchor; -#endif struct ubi_wl_entry *e1, *e2; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; @@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, }
#ifdef CONFIG_MTD_UBI_FASTMAP - /* Check whether we need to produce an anchor PEB */ - if (!anchor) - anchor = !anchor_pebs_available(&ubi->free); - - if (anchor) { + if (ubi->fm_do_produce_anchor) { e1 = find_anchor_wl_entry(&ubi->used); if (!e1) goto out_cancel; @@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); + ubi->fm_do_produce_anchor = 0; } else if (!ubi->scrub.rb_node) { #else if (!ubi->scrub.rb_node) { @@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) goto out_cancel; }
- wrk->anchor = 0; wrk->func = &wear_leveling_worker; if (nested) __schedule_ubi_work(ubi, wrk); @@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { spin_lock(&ubi->wl_lock); - wl_tree_add(e, &ubi->free); - ubi->free_count++; + + if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { + ubi->fm_anchor = e; + ubi->fm_do_produce_anchor = 0; + } else { + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } + spin_unlock(&ubi->wl_lock);
/* @@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (err) goto out_free;
+#ifdef CONFIG_MTD_UBI_FASTMAP + ubi_ensure_anchor_pebs(ubi); +#endif return 0;
out_free: diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index a9e2d669acd81..c93a532937863 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -2,7 +2,6 @@ #ifndef UBI_WL_H #define UBI_WL_H #ifdef CONFIG_MTD_UBI_FASTMAP -static int anchor_pebs_available(struct rb_root *root); static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
From: Bradley Bolen bradleybolen@gmail.com
[ Upstream commit f3d7c2292d104519195fdb11192daec13229c219 ]
With large eMMC cards, it is possible to create general purpose partitions that are bigger than 4GB. The size member of the mmc_part struct is only an unsigned int which overflows for gp partitions larger than 4GB. Change this to a u64 to handle the overflow.
Signed-off-by: Bradley Bolen bradleybolen@gmail.com Signed-off-by: Ulf Hansson ulf.hansson@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mmc/core/mmc.c | 9 ++++----- include/linux/mmc/card.h | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b7159e243323b..de14b5845f525 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd) } }
-static void mmc_part_add(struct mmc_card *card, unsigned int size, +static void mmc_part_add(struct mmc_card *card, u64 size, unsigned int part_cfg, char *name, int idx, bool ro, int area_type) { @@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) { int idx; u8 hc_erase_grp_sz, hc_wp_grp_sz; - unsigned int part_size; + u64 part_size;
/* * General purpose partition feature support -- @@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] << 8) + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; - part_size *= (size_t)(hc_erase_grp_sz * - hc_wp_grp_sz); + part_size *= (hc_erase_grp_sz * hc_wp_grp_sz); mmc_part_add(card, part_size << 19, EXT_CSD_PART_CONFIG_ACC_GP0 + idx, "gp%d", idx, false, @@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; - unsigned int part_size; + u64 part_size; struct device_node *np; bool broken_hpi = false;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index e459b38ef33cc..cf3780a6ccc4b 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -226,7 +226,7 @@ struct mmc_queue_req; * MMC Physical partitions */ struct mmc_part { - unsigned int size; /* partition size (in bytes) */ + u64 size; /* partition size (in bytes) */ unsigned int part_cfg; /* partition type */ char name[MAX_MMC_PART_NAME_LEN]; bool force_ro; /* to make boot parts RO by default */
From: Bob Peterson rpeterso@redhat.com
[ Upstream commit 2c47c1be51fbded1f7baa2ceaed90f97932f79be ]
Before this patch, gfs2_create_inode had a use-after-free for the iopen glock in some error paths because it did this:
gfs2_glock_put(io_gl); fail_gunlock2: if (io_gl) clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
In some cases, the io_gl was used for create and only had one reference, so the glock might be freed before the clear_bit(). This patch tries to straighten it out by only jumping to the error paths where iopen is properly set, and moving the gfs2_glock_put after the clear_bit.
Signed-off-by: Bob Peterson rpeterso@redhat.com Signed-off-by: Andreas Gruenbacher agruenba@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/gfs2/inode.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 8466166f22e3d..988bb7b17ed8f 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -712,7 +712,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_trans_begin(sdp, blocks, 0); if (error) - goto fail_gunlock2; + goto fail_free_inode;
if (blocks > 1) { ip->i_eattr = ip->i_no_addr + 1; @@ -723,7 +723,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_gunlock2; + goto fail_free_inode;
BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
@@ -732,7 +732,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail_gunlock2;
glock_set_object(ip->i_iopen_gh.gh_gl, ip); - gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode);
@@ -765,6 +764,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
mark_inode_dirty(inode); d_instantiate(dentry, inode); + /* After instantiate, errors should result in evict which will destroy + * both inode and iopen glocks properly. */ if (file) { file->f_mode |= FMODE_CREATED; error = finish_open(file, dentry, gfs2_open_common); @@ -772,15 +773,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); return error;
fail_gunlock3: glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); - gfs2_glock_put(io_gl); fail_gunlock2: - if (io_gl) - clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { glock_clear_object(ip->i_gl, ip);
From: peter chang dpf@google.com
[ Upstream commit 51c1c5f6ed64c2b65a8cf89dac136273d25ca540 ]
Added the fix so the if driver properly sent the abort it tries to remove it from the firmware's list of outstanding commands regardless of the abort status. This means that the task gets freed 'now' rather than possibly getting freed later when the scsi layer thinks it's leaked but still valid.
Link: https://lore.kernel.org/r/20191114100910.6153-10-deepak.ukey@microchip.com Acked-by: Jack Wang jinpu.wang@cloud.ionos.com Signed-off-by: peter chang dpf@google.com Signed-off-by: Deepak Ukey deepak.ukey@microchip.com Signed-off-by: Viswas G Viswas.G@microchip.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/pm8001/pm8001_sas.c | 50 +++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 13 deletions(-)
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 7e48154e11c36..7912ed64d3b9c 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1202,8 +1202,8 @@ int pm8001_abort_task(struct sas_task *task) pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); phy_id = pm8001_dev->attached_phy; - rc = pm8001_find_tag(task, &tag); - if (rc == 0) { + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { pm8001_printk("no tag for task:%p\n", task); return TMF_RESP_FUNC_FAILED; } @@ -1241,26 +1241,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */ reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; phy->reset_success = false; phy->enable_completion = &completion; phy->reset_completion = &completion_reset; ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); - if (ret) - goto out; - PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("Waiting for local phy ctl\n")); - wait_for_completion(&completion); - if (!phy->reset_success) + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; goto out; + }
- /* 3. Wait for Port Reset complete / Port reset TMO */ + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Waiting for local phy ctl\n")); + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Waiting for Port reset\n")); - wait_for_completion(&completion_reset); - if (phy->port_reset_status) { - pm8001_dev_gone_notify(dev); - goto out; + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + goto out; + } }
/*
From: Felix Fietkau nbd@nbd.name
[ Upstream commit 36f7e2b2bb1de86f0072cd49ca93d82b9e8fd894 ]
With the devm API, the unregister happens after the device cleanup is done, after which the struct mt76_dev which contains the led_cdev has already been freed. This leads to a use-after-free bug that can crash the system.
Signed-off-by: Felix Fietkau nbd@nbd.name Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/mediatek/mt76/mac80211.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 1a2c143b34d01..7be5806a1c398 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -105,7 +105,15 @@ static int mt76_led_init(struct mt76_dev *dev) dev->led_al = of_property_read_bool(np, "led-active-low"); }
- return devm_led_classdev_register(dev->dev, &dev->led_cdev); + return led_classdev_register(dev->dev, &dev->led_cdev); +} + +static void mt76_led_cleanup(struct mt76_dev *dev) +{ + if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) + return; + + led_classdev_unregister(&dev->led_cdev); }
static void mt76_init_stream_cap(struct mt76_dev *dev, @@ -360,6 +368,7 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw;
+ mt76_led_cleanup(dev); mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); }
From: Felix Fietkau nbd@nbd.name
[ Upstream commit 1a817fa73c3b27a593aadf0029de24db1bbc1a3e ]
This is needed primarily to avoid races in dealing with rx aggregation related data structures
Signed-off-by: Felix Fietkau nbd@nbd.name Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/mediatek/mt76/mt7603/main.c | 2 ++ drivers/net/wireless/mediatek/mt76/mt7615/main.c | 2 ++ drivers/net/wireless/mediatek/mt76/mt76x02_util.c | 2 ++ 3 files changed, 6 insertions(+)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 25d5b1608bc91..0a5695c3d9241 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -561,6 +561,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, @@ -590,6 +591,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } + mutex_unlock(&dev->mt76.mutex);
return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 87c748715b5d7..38183aef0eb92 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -455,6 +455,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, @@ -485,6 +486,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } + mutex_unlock(&dev->mt76.mutex);
return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index aec73a0295e86..de0d6f21c621c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -371,6 +371,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)txq->drv_priv;
+ mutex_lock(&dev->mt76.mutex); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, @@ -400,6 +401,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } + mutex_unlock(&dev->mt76.mutex);
return 0; }
From: Kusanagi Kouichi slash@ac.auone-net.jp
[ Upstream commit 4250b047039d324e0ff65267c8beb5bad5052a86 ]
If DEBUG_FS=n, compile fails with the following error:
kernel/trace/trace.c: In function 'tracing_init_dentry': kernel/trace/trace.c:8658:9: error: passing argument 3 of 'debugfs_create_automount' from incompatible pointer type [-Werror=incompatible-pointer-types] 8658 | trace_automount, NULL); | ^~~~~~~~~~~~~~~ | | | struct vfsmount * (*)(struct dentry *, void *) In file included from kernel/trace/trace.c:24: ./include/linux/debugfs.h:206:25: note: expected 'struct vfsmount * (*)(void *)' but argument is of type 'struct vfsmount * (*)(struct dentry *, void *)' 206 | struct vfsmount *(*f)(void *), | ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~
Signed-off-by: Kusanagi Kouichi slash@ac.auone-net.jp Link: https://lore.kernel.org/r/20191121102021787.MLMY.25002.ppp.dion.ne.jp@dmta00... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/debugfs.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 58424eb3b3291..798f0b9b43aee 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -54,6 +54,8 @@ static const struct file_operations __fops = { \ .llseek = no_llseek, \ }
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); + #if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent); @@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest);
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, debugfs_automount_t f, @@ -203,7 +204,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
static inline struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, - struct vfsmount *(*f)(void *), + debugfs_automount_t f, void *data) { return ERR_PTR(-ENODEV);
From: Chuck Lever chuck.lever@oracle.com
[ Upstream commit a264abad51d8ecb7954a2f6d9f1885b38daffc74 ]
RPC tasks on the backchannel never invoke xprt_complete_rqst(), so there is no way to report their tk_status at completion. Also, any RPC task that exits via rpc_exit_task() before it is replied to will also disappear without a trace.
Introduce a trace point that is symmetrical with rpc_task_begin that captures the termination status of each RPC task.
Sample trace output for callback requests initiated on the server: kworker/u8:12-448 [003] 127.025240: rpc_task_end: task:50@3 flags=ASYNC|DYNAMIC|SOFT|SOFTCONN|SENT runstate=RUNNING|ACTIVE status=0 action=rpc_exit_task kworker/u8:12-448 [002] 127.567310: rpc_task_end: task:51@3 flags=ASYNC|DYNAMIC|SOFT|SOFTCONN|SENT runstate=RUNNING|ACTIVE status=0 action=rpc_exit_task kworker/u8:12-448 [001] 130.506817: rpc_task_end: task:52@3 flags=ASYNC|DYNAMIC|SOFT|SOFTCONN|SENT runstate=RUNNING|ACTIVE status=0 action=rpc_exit_task
Odd, though, that I never see trace_rpc_task_complete, either in the forward or backchannel. Should it be removed?
Signed-off-by: Chuck Lever chuck.lever@oracle.com Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Signed-off-by: Sasha Levin sashal@kernel.org --- include/trace/events/sunrpc.h | 1 + net/sunrpc/sched.c | 1 + 2 files changed, 2 insertions(+)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ffa3c51dbb1a0..28df77a948e56 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -165,6 +165,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, DEFINE_RPC_RUNNING_EVENT(begin); DEFINE_RPC_RUNNING_EVENT(run_action); DEFINE_RPC_RUNNING_EVENT(complete); +DEFINE_RPC_RUNNING_EVENT(end);
DECLARE_EVENT_CLASS(rpc_task_queued,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 987c4b1f0b174..9c79548c68474 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -824,6 +824,7 @@ rpc_reset_task_statistics(struct rpc_task *task) */ void rpc_exit_task(struct rpc_task *task) { + trace_rpc_task_end(task, task->tk_action); task->tk_action = NULL; if (task->tk_ops->rpc_count_stats) task->tk_ops->rpc_count_stats(task, task->tk_calldata);
From: Markus Elfring elfring@users.sourceforge.net
[ Upstream commit 2b1116bbe898aefdf584838448c6869f69851e0f ]
Move the same error code assignments so that such exception handling can be better reused at the end of this function.
This issue was detected by using the Coccinelle software.
Signed-off-by: Markus Elfring elfring@users.sourceforge.net Signed-off-by: Steve French stfrench@microsoft.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/smb2ops.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 7ccbfc6564787..318d805e74d40 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1565,35 +1565,32 @@ smb2_ioctl_query_info(const unsigned int xid, if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length) qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); if (qi.input_buffer_length > 0 && - le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) { - rc = -EFAULT; - goto iqinf_exit; - } - if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) { - rc = -EFAULT; - goto iqinf_exit; - } + le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length + > rsp_iov[1].iov_len) + goto e_fault; + + if (copy_to_user(&pqi->input_buffer_length, + &qi.input_buffer_length, + sizeof(qi.input_buffer_length))) + goto e_fault; + if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), - qi.input_buffer_length)) { - rc = -EFAULT; - goto iqinf_exit; - } + qi.input_buffer_length)) + goto e_fault; } else { pqi = (struct smb_query_info __user *)arg; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length) qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); - if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) { - rc = -EFAULT; - goto iqinf_exit; - } - if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) { - rc = -EFAULT; - goto iqinf_exit; - } + if (copy_to_user(&pqi->input_buffer_length, + &qi.input_buffer_length, + sizeof(qi.input_buffer_length))) + goto e_fault; + + if (copy_to_user(pqi + 1, qi_rsp->Buffer, + qi.input_buffer_length)) + goto e_fault; }
iqinf_exit: @@ -1609,6 +1606,10 @@ smb2_ioctl_query_info(const unsigned int xid, free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); return rc; + +e_fault: + rc = -EFAULT; + goto iqinf_exit; }
static ssize_t
From: Pavel Shilovsky pshilov@microsoft.com
[ Upstream commit 9bd4540836684013aaad6070a65d6fcdd9006625 ]
Currenly we doesn't assume that a server may break a lease from RWH to RW which causes us setting a wrong lease state on a file and thus mistakenly flushing data and byte-range locks and purging cached data on the client. This leads to performance degradation because subsequent IOs go directly to the server.
Fix this by propagating new lease state and epoch values to the oplock break handler through cifsFileInfo structure and removing the use of cifsInodeInfo flags for that. It allows to avoid some races of several lease/oplock breaks using those flags in parallel.
Signed-off-by: Pavel Shilovsky pshilov@microsoft.com Signed-off-by: Steve French stfrench@microsoft.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/cifsglob.h | 9 ++++++--- fs/cifs/file.c | 10 +++++++--- fs/cifs/misc.c | 17 +++-------------- fs/cifs/smb1ops.c | 8 +++----- fs/cifs/smb2misc.c | 32 +++++++------------------------- fs/cifs/smb2ops.c | 44 ++++++++++++++++++++++++++++++-------------- fs/cifs/smb2pdu.h | 2 +- 7 files changed, 57 insertions(+), 65 deletions(-)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f9cbdfc1591b1..b16c994414ab0 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -268,8 +268,9 @@ struct smb_version_operations { int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); - void (*downgrade_oplock)(struct TCP_Server_Info *, - struct cifsInodeInfo *, bool); + void (*downgrade_oplock)(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); @@ -1261,6 +1262,8 @@ struct cifsFileInfo { unsigned int f_flags; bool invalidHandle:1; /* file closed via session abend */ bool oplock_break_cancelled:1; + unsigned int oplock_epoch; /* epoch from the lease break */ + __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ @@ -1408,7 +1411,7 @@ struct cifsInodeInfo { unsigned int epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ -#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ +#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4959dbe740f71..14ae341755d47 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4675,12 +4675,13 @@ void cifs_oplock_break(struct work_struct *work) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; + bool purge_cache = false;
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE);
- server->ops->downgrade_oplock(server, cinode, - test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, + cfile->oplock_epoch, &purge_cache);
if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && cifs_has_mand_locks(cinode)) { @@ -4695,18 +4696,21 @@ void cifs_oplock_break(struct work_struct *work) else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); - if (!CIFS_CACHE_READ(cinode)) { + if (!CIFS_CACHE_READ(cinode) || purge_cache) { rc = filemap_fdatawait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); cifs_zap_mapping(inode); } cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); + if (CIFS_CACHE_WRITE(cinode)) + goto oplock_break_ack; }
rc = cifs_push_locks(cfile); if (rc) cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+oplock_break_ack: /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 5ad83bdb9bea3..40ca394fd5de9 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -488,21 +488,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags);
- /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (pSMB->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - - cifs_queue_oplock_break(netfile); + netfile->oplock_epoch = 0; + netfile->oplock_level = pSMB->OplockLevel; netfile->oplock_break_cancelled = false; + cifs_queue_oplock_break(netfile);
spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 195766221a7a8..e523c05a44876 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -369,12 +369,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
static void cifs_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - cifs_set_oplock_level(cinode, OPLOCK_READ); - else - cifs_set_oplock_level(cinode, 0); + cifs_set_oplock_level(cinode, oplock); }
static bool diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 2fc96f7923ee5..7d875a47d0226 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -550,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
cifs_dbg(FYI, "found in the open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state);
if (ack_req) cfile->oplock_break_cancelled = false; @@ -559,17 +559,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
- /* - * Set or clear flags depending on the lease state being READ. - * HANDLE caching flag should be added when the client starts - * to defer closing remote file handles with HANDLE leases. - */ - if (lease_state & SMB2_LEASE_READ_CACHING_HE) - set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); + cfile->oplock_level = lease_state;
cifs_queue_oplock_break(cfile); return true; @@ -599,7 +590,7 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
cifs_dbg(FYI, "found in the pending open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state);
open->oplock = lease_state; } @@ -732,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
- /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (rsp->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = 0; + cfile->oplock_level = rsp->OplockLevel; + spin_unlock(&cfile->file_info_lock);
cifs_queue_oplock_break(cfile); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 318d805e74d40..64ad466695c55 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -3332,22 +3332,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
static void smb2_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, - 0, NULL); - else - server->ops->set_oplock_level(cinode, 0, 0, NULL); + server->ops->set_oplock_level(cinode, oplock, 0, NULL); }
static void -smb21_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) +smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); + +static void +smb3_downgrade_oplock(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - server->ops->set_oplock_level(cinode, - set_level2 ? SMB2_LEASE_READ_CACHING_HE : - 0, 0, NULL); + unsigned int old_state = cinode->oplock; + unsigned int old_epoch = cinode->epoch; + unsigned int new_state; + + if (epoch > old_epoch) { + smb21_set_oplock_level(cinode, oplock, 0, NULL); + cinode->epoch = epoch; + } + + new_state = cinode->oplock; + *purge_cache = false; + + if ((old_state & CIFS_CACHE_READ_FLG) != 0 && + (new_state & CIFS_CACHE_READ_FLG) == 0) + *purge_cache = true; + else if (old_state == new_state && (epoch - old_epoch > 1)) + *purge_cache = true; }
static void @@ -4607,7 +4623,7 @@ struct smb_version_operations smb21_operations = { .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, @@ -4707,7 +4723,7 @@ struct smb_version_operations smb30_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb3_negotiate_wsize, @@ -4815,7 +4831,7 @@ struct smb_version_operations smb311_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb3_negotiate_wsize, diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 0abfde6d0b051..f264e1d36fe16 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -1386,7 +1386,7 @@ struct smb2_oplock_break { struct smb2_lease_break { struct smb2_sync_hdr sync_hdr; __le16 StructureSize; /* Must be 44 */ - __le16 Reserved; + __le16 Epoch; __le32 Flags; __u8 LeaseKey[16]; __le32 CurrentLeaseState;
From: Jaegeuk Kim jaegeuk@kernel.org
[ Upstream commit 803e74be04b32f7785742dcabfc62116718fbb06 ]
We must stop GC, once the segment becomes fully valid. Otherwise, it can produce another dirty segments by moving valid blocks in the segment partially.
Ramon hit no free segment panic sometimes and saw this case happens when validating reliable file pinning feature.
Signed-off-by: Ramon Pantin pantin@google.com Signed-off-by: Jaegeuk Kim jaegeuk@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- fs/f2fs/gc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index e611d768efde3..a78aa5480454f 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -1012,8 +1012,14 @@ next_step: block_t start_bidx; nid_t nid = le32_to_cpu(entry->nid);
- /* stop BG_GC if there is not enough free sections. */ - if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) + /* + * stop BG_GC if there is not enough free sections. + * Or, stop GC if the segment becomes fully valid caused by + * race condition along with SSR block allocation. + */ + if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || + get_valid_blocks(sbi, segno, false) == + sbi->blocks_per_seg) return submitted;
if (check_valid_map(sbi, segno, off) == 0)
From: Tzung-Bi Shih tzungbi@google.com
[ Upstream commit acb874a7c049ec49d8fc66c893170fb42c01bdf7 ]
It was observed Baytrail-based chromebooks could cause continuous PLL unlocked when using playback stream and capture stream simultaneously. Specifically, starting a capture stream after started a playback stream. As a result, the audio data could corrupt or turn completely silent.
As the datasheet suggested, the maximum PLL lock time should be 7 msec. The workaround resets the codec softly by toggling SHDN off and on if PLL failed to lock for 10 msec. Notably, there is no suggested hold time for SHDN off.
On Baytrail-based chromebooks, it would easily happen continuous PLL unlocked if there is a 10 msec delay between SHDN off and on. Removes the msleep().
Signed-off-by: Tzung-Bi Shih tzungbi@google.com Link: https://lore.kernel.org/r/20191122073114.219945-2-tzungbi@google.com Reviewed-by: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- sound/soc/codecs/max98090.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 45da2b51543e7..6b9d326e11b07 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2112,10 +2112,16 @@ static void max98090_pll_work(struct max98090_priv *max98090)
dev_info_ratelimited(component->dev, "PLL unlocked\n");
+ /* + * As the datasheet suggested, the maximum PLL lock time should be + * 7 msec. The workaround resets the codec softly by toggling SHDN + * off and on if PLL failed to lock for 10 msec. Notably, there is + * no suggested hold time for SHDN off. + */ + /* Toggle shutdown OFF then ON */ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, 0); - msleep(10); snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, M98090_SHDNN_MASK);
From: Max Filippov jcmvbkbc@gmail.com
[ Upstream commit 02ce94c229251555ac726ecfebe3458ef5905fa9 ]
Don't overwrite return value if system call was cancelled at entry by ptrace. Return status code from do_syscall_trace_enter so that pt_regs::syscall doesn't need to be changed to skip syscall.
Signed-off-by: Max Filippov jcmvbkbc@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/xtensa/kernel/entry.S | 4 ++-- arch/xtensa/kernel/ptrace.c | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 59671603c9c62..1f07876ea2ed7 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1897,6 +1897,7 @@ ENTRY(system_call)
mov a6, a2 call4 do_syscall_trace_enter + beqz a6, .Lsyscall_exit l32i a7, a2, PT_SYSCALL
1: @@ -1911,8 +1912,6 @@ ENTRY(system_call)
addx4 a4, a7, a4 l32i a4, a4, 0 - movi a5, sys_ni_syscall; - beq a4, a5, 1f
/* Load args: arg0 - arg5 are passed via regs. */
@@ -1932,6 +1931,7 @@ ENTRY(system_call)
s32i a6, a2, PT_AREG2 bnez a3, 1f +.Lsyscall_exit: abi_ret(4)
1: diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index b964f0b2d8864..145742d70a9f2 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request, return ret; }
-void do_syscall_trace_enter(struct pt_regs *regs) +void do_syscall_trace_leave(struct pt_regs *regs); +int do_syscall_trace_enter(struct pt_regs *regs) { + if (regs->syscall == NO_SYSCALL) + regs->areg[2] = -ENOSYS; + if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) + tracehook_report_syscall_entry(regs)) { + regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; + return 0; + } + + if (regs->syscall == NO_SYSCALL) { + do_syscall_trace_leave(regs); + return 0; + }
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); + + return 1; }
void do_syscall_trace_leave(struct pt_regs *regs)
From: Vasily Gorbik gor@linux.ibm.com
[ Upstream commit 7bcaad1f9fac889f5fcd1a383acf7e00d006da41 ]
CALL_ON_STACK is intended to be used for temporary stack switching with potential return to the caller.
When CALL_ON_STACK is misused to switch from nodat stack to task stack back_chain information would later lead stack unwinder from task stack into (per cpu) nodat stack which is reused for other purposes. This would yield confusing unwinding result or errors.
To avoid that introduce CALL_ON_STACK_NORETURN to be used instead. It makes sure that back_chain is zeroed and unwinder finishes gracefully ending up at task pt_regs.
Reviewed-by: Heiko Carstens heiko.carstens@de.ibm.com Signed-off-by: Vasily Gorbik gor@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/s390/include/asm/stacktrace.h | 11 +++++++++++ arch/s390/kernel/setup.c | 9 +-------- arch/s390/kernel/smp.c | 2 +- 3 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 0ae4bbf7779c8..3679d224fd3c5 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -111,4 +111,15 @@ struct stack_frame { r2; \ })
+#define CALL_ON_STACK_NORETURN(fn, stack) \ +({ \ + asm volatile( \ + " la 15,0(%[_stack])\n" \ + " xc %[_bc](8,15),%[_bc](15)\n" \ + " brasl 14,%[_fn]\n" \ + ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ + [_stack] "a" (stack), [_fn] "X" (fn)); \ + BUG(); \ +}) + #endif /* _ASM_S390_STACKTRACE_H */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 07b2b61a0289f..82ef081e7448e 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -356,7 +356,6 @@ early_initcall(async_stack_realloc);
void __init arch_call_rest_init(void) { - struct stack_frame *frame; unsigned long stack;
stack = stack_alloc(); @@ -369,13 +368,7 @@ void __init arch_call_rest_init(void) set_task_stack_end_magic(current); stack += STACK_INIT_OFFSET; S390_lowcore.kernel_stack = stack; - frame = (struct stack_frame *) stack; - memset(frame, 0, sizeof(*frame)); - /* Branch to rest_init on the new stack, never returns */ - asm volatile( - " la 15,0(%[_frame])\n" - " jg rest_init\n" - : : [_frame] "a" (frame)); + CALL_ON_STACK_NORETURN(rest_init, stack); }
static void __init setup_lowcore_dat_off(void) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 66bf050d785cf..ad426cc656e56 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -878,7 +878,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid) S390_lowcore.restart_source = -1UL; __ctl_load(S390_lowcore.cregs_save_area, 0, 15); __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); - CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); + CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); }
/* Upping and downing of CPUs */
From: Omar Sandoval osandov@fb.com
[ Upstream commit 0c4da70c83d41a8461fdf50a3f7b292ecb04e378 ]
Realtime files in XFS allocate extents in rextsize units. However, the written/unwritten state of those extents is still tracked in blocksize units. Therefore, a realtime file can be split up into written and unwritten extents that are not necessarily aligned to the realtime extent size. __xfs_bunmapi() has some logic to handle these various corner cases. Consider how it handles the following case:
1. The last extent is unwritten. 2. The last extent is smaller than the realtime extent size. 3. startblock of the last extent is not aligned to the realtime extent size, but startblock + blockcount is.
In this case, __xfs_bunmapi() calls xfs_bmap_add_extent_unwritten_real() to set the second-to-last extent to unwritten. This should merge the last and second-to-last extents, so __xfs_bunmapi() moves on to the second-to-last extent.
However, if the size of the last and second-to-last extents combined is greater than MAXEXTLEN, xfs_bmap_add_extent_unwritten_real() does not merge the two extents. When that happens, __xfs_bunmapi() skips past the last extent without unmapping it, thus leaking the space.
Fix it by only unwriting the minimum amount needed to align the last extent to the realtime extent size, which is guaranteed to merge with the last extent.
Signed-off-by: Omar Sandoval osandov@fb.com Reviewed-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/libxfs/xfs_bmap.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 19a600443b9ee..f8db3fe616df9 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -5376,16 +5376,17 @@ __xfs_bunmapi( } div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod); if (mod) { + xfs_extlen_t off = mp->m_sb.sb_rextsize - mod; + /* * Realtime extent is lined up at the end but not * at the front. We'll get rid of full extents if * we can. */ - mod = mp->m_sb.sb_rextsize - mod; - if (del.br_blockcount > mod) { - del.br_blockcount -= mod; - del.br_startoff += mod; - del.br_startblock += mod; + if (del.br_blockcount > off) { + del.br_blockcount -= off; + del.br_startoff += off; + del.br_startblock += off; } else if (del.br_startoff == start && (del.br_state == XFS_EXT_UNWRITTEN || tp->t_blk_res == 0)) { @@ -5403,6 +5404,7 @@ __xfs_bunmapi( continue; } else if (del.br_state == XFS_EXT_UNWRITTEN) { struct xfs_bmbt_irec prev; + xfs_fileoff_t unwrite_start;
/* * This one is already unwritten. @@ -5416,12 +5418,13 @@ __xfs_bunmapi( ASSERT(!isnullstartblock(prev.br_startblock)); ASSERT(del.br_startblock == prev.br_startblock + prev.br_blockcount); - if (prev.br_startoff < start) { - mod = start - prev.br_startoff; - prev.br_blockcount -= mod; - prev.br_startblock += mod; - prev.br_startoff = start; - } + unwrite_start = max3(start, + del.br_startoff - mod, + prev.br_startoff); + mod = unwrite_start - prev.br_startoff; + prev.br_startoff = unwrite_start; + prev.br_startblock += mod; + prev.br_blockcount -= mod; prev.br_state = XFS_EXT_UNWRITTEN; error = xfs_bmap_add_extent_unwritten_real(tp, ip, whichfork, &icur, &cur,
From: Monk Liu Monk.Liu@amd.com
[ Upstream commit 82a829dc8c2bb03cc9b7e5beb1c5479aa3ba7831 ]
issue: kernel would report a warning from a double unpin during the driver unloading on the CSB bo
why: we unpin it during hw_fini, and there will be another unpin in sw_fini on CSB bo.
fix: actually we don't need to pin/unpin it during hw_init/fini since it is created with kernel pinned, we only need to fullfill the CSB again during hw_init to prevent CSB/VRAM lost after S3
v2: get_csb in init_rlc so hw_init() will make CSIB content back even after reset or s3
v3: use bo_create_kernel instead of bo_create_reserved for CSB otherwise the bo_free_kernel() on CSB is not aligned and would lead to its internal reserve pending there forever
take care of gfx7/8 as well
Signed-off-by: Monk Liu Monk.Liu@amd.com Reviewed-by: Hawking Zhang Hawking.Zhang@amd.com Reviewed-by: Xiaojie Yuan xiaojie.yuan@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 10 +---- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 58 +------------------------ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 40 +---------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 40 +---------------- 5 files changed, 6 insertions(+), 144 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index c8793e6cc3c5d..6373bfb47d55d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -124,13 +124,12 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) */ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) { - volatile u32 *dst_ptr; u32 dws; int r;
/* allocate clear state block */ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); - r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, @@ -141,13 +140,6 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) return r; }
- /* set up the cs buffer */ - dst_ptr = adev->gfx.rlc.cs_ptr; - adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - return 0; }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 19876c90be0e1..d17edc850427a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -993,39 +993,6 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return 0; }
-static int gfx_v10_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v10_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -1787,25 +1754,7 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { - int r; - - if (adev->in_gpu_reset) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (r) - return r; - - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, - (void **)&adev->gfx.rlc.cs_ptr); - if (!r) { - adev->gfx.rlc.funcs->get_csb_buffer(adev, - adev->gfx.rlc.cs_ptr); - amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); - } - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - if (r) - return r; - } + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, @@ -3774,10 +3723,6 @@ static int gfx_v10_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gfx_v10_0_csb_vram_pin(adev); - if (r) - return r; - if (!amdgpu_emu_mode) gfx_v10_0_init_golden_registers(adev);
@@ -3865,7 +3810,6 @@ static int gfx_v10_0_hw_fini(void *handle) } gfx_v10_0_cp_enable(adev, false); gfx_v10_0_enable_gui_idle_interrupt(adev, false); - gfx_v10_0_csb_vram_unpin(adev);
return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 791ba398f007e..d92e92e5d50b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4554,6 +4554,8 @@ static int gfx_v7_0_hw_init(void *handle)
gfx_v7_0_constants_init(adev);
+ /* init CSB */ + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* init rlc */ r = adev->gfx.rlc.funcs->resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index cc88ba76a8d4a..467ed7fca884d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,39 +1321,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; }
-static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -3917,6 +3884,7 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v8_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32(mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -4837,10 +4805,6 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev);
- r = gfx_v8_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4958,8 +4922,6 @@ static int gfx_v8_0_hw_fini(void *handle) pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev);
- gfx_v8_0_csb_vram_unpin(adev); - return 0; }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6004fdacc8663..90dcc7afc9c43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1675,39 +1675,6 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return 0; }
-static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) -{ - int r; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) - return r; - - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, - AMDGPU_GEM_DOMAIN_VRAM); - if (!r) - adev->gfx.rlc.clear_state_gpu_addr = - amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - return r; -} - -static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) -{ - int r; - - if (!adev->gfx.rlc.clear_state_obj) - return; - - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (likely(r == 0)) { - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - } -} - static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -2596,6 +2563,7 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
static void gfx_v9_0_init_csb(struct amdgpu_device *adev) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), adev->gfx.rlc.clear_state_gpu_addr >> 32); @@ -3888,10 +3856,6 @@ static int gfx_v9_0_hw_init(void *handle)
gfx_v9_0_constants_init(adev);
- r = gfx_v9_0_csb_vram_pin(adev); - if (r) - return r; - r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -3977,8 +3941,6 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); adev->gfx.rlc.funcs->stop(adev);
- gfx_v9_0_csb_vram_unpin(adev); - return 0; }
From: Mark Rutland mark.rutland@arm.com
[ Upstream commit ca2ef4ffabbef25644e02a98b0f48869f8be0375 ]
A kernel built with KASAN && FTRACE_WITH_REGS && !MODULES, produces a boot-time splat in the bowels of ftrace:
| [ 0.000000] ftrace: allocating 32281 entries in 127 pages | [ 0.000000] ------------[ cut here ]------------ | [ 0.000000] WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:2019 ftrace_bug+0x27c/0x328 | [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.4.0-rc3-00008-g7f08ae53a7e3 #13 | [ 0.000000] Hardware name: linux,dummy-virt (DT) | [ 0.000000] pstate: 60000085 (nZCv daIf -PAN -UAO) | [ 0.000000] pc : ftrace_bug+0x27c/0x328 | [ 0.000000] lr : ftrace_init+0x640/0x6cc | [ 0.000000] sp : ffffa000120e7e00 | [ 0.000000] x29: ffffa000120e7e00 x28: ffff00006ac01b10 | [ 0.000000] x27: ffff00006ac898c0 x26: dfffa00000000000 | [ 0.000000] x25: ffffa000120ef290 x24: ffffa0001216df40 | [ 0.000000] x23: 000000000000018d x22: ffffa0001244c700 | [ 0.000000] x21: ffffa00011bf393c x20: ffff00006ac898c0 | [ 0.000000] x19: 00000000ffffffff x18: 0000000000001584 | [ 0.000000] x17: 0000000000001540 x16: 0000000000000007 | [ 0.000000] x15: 0000000000000000 x14: ffffa00010432770 | [ 0.000000] x13: ffff940002483519 x12: 1ffff40002483518 | [ 0.000000] x11: 1ffff40002483518 x10: ffff940002483518 | [ 0.000000] x9 : dfffa00000000000 x8 : 0000000000000001 | [ 0.000000] x7 : ffff940002483519 x6 : ffffa0001241a8c0 | [ 0.000000] x5 : ffff940002483519 x4 : ffff940002483519 | [ 0.000000] x3 : ffffa00011780870 x2 : 0000000000000001 | [ 0.000000] x1 : 1fffe0000d591318 x0 : 0000000000000000 | [ 0.000000] Call trace: | [ 0.000000] ftrace_bug+0x27c/0x328 | [ 0.000000] ftrace_init+0x640/0x6cc | [ 0.000000] start_kernel+0x27c/0x654 | [ 0.000000] random: get_random_bytes called from print_oops_end_marker+0x30/0x60 with crng_init=0 | [ 0.000000] ---[ end trace 0000000000000000 ]--- | [ 0.000000] ftrace faulted on writing | [ 0.000000] [<ffffa00011bf393c>] _GLOBAL__sub_D_65535_0___tracepoint_initcall_level+0x4/0x28 | [ 0.000000] Initializing ftrace call sites | [ 0.000000] ftrace record flags: 0 | [ 0.000000] (0) | [ 0.000000] expected tramp: ffffa000100b3344
This is due to an unfortunate combination of several factors.
Building with KASAN results in the compiler generating anonymous functions to register/unregister global variables against the shadow memory. These functions are placed in .text.startup/.text.exit, and given mangled names like _GLOBAL__sub_{I,D}_65535_0_$OTHER_SYMBOL. The kernel linker script places these in .init.text and .exit.text respectively, which are both discarded at runtime as part of initmem.
Building with FTRACE_WITH_REGS uses -fpatchable-function-entry=2, which also instruments KASAN's anonymous functions. When these are discarded with the rest of initmem, ftrace removes dangling references to these call sites.
Building without MODULES implicitly disables STRICT_MODULE_RWX, and causes arm64's patch_map() function to treat any !core_kernel_text() symbol as something that can be modified in-place. As core_kernel_text() is only true for .text and .init.text, with the latter depending on system_state < SYSTEM_RUNNING, we'll treat .exit.text as something that can be patched in-place. However, .exit.text is mapped read-only.
Hence in this configuration the ftrace init code blows up while trying to patch one of the functions generated by KASAN.
We could try to filter out the call sites in .exit.text rather than initializing them, but this would be inconsistent with how we handle .init.text, and requires hooking into core bits of ftrace. The behaviour of patch_map() is also inconsistent today, so instead let's clean that up and have it consistently handle .exit.text.
This patch teaches patch_map() to handle .exit.text at init time, preventing the boot-time splat above. The flow of patch_map() is reworked to make the logic clearer and minimize redundant conditionality.
Fixes: 3b23e4991fb66f6d ("arm64: implement ftrace with regs") Signed-off-by: Mark Rutland mark.rutland@arm.com Cc: Amit Daniel Kachhap amit.kachhap@arm.com Cc: Ard Biesheuvel ard.biesheuvel@linaro.org Cc: Torsten Duwe duwe@suse.de Cc: Will Deacon will@kernel.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/include/asm/sections.h | 1 + arch/arm64/kernel/insn.c | 22 ++++++++++++++++++---- arch/arm64/kernel/vmlinux.lds.S | 3 +++ 3 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 788ae971f11c1..25a73aab438f9 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; +extern char __exittext_begin[], __exittext_end[]; extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index a612da533ea20..53bcf5386907f 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -21,6 +21,7 @@ #include <asm/fixmap.h> #include <asm/insn.h> #include <asm/kprobes.h> +#include <asm/sections.h>
#define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_N_BIT BIT(22) @@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn)
static DEFINE_RAW_SPINLOCK(patch_lock);
+static bool is_exit_text(unsigned long addr) +{ + /* discarded with init text/data */ + return system_state < SYSTEM_RUNNING && + addr >= (unsigned long)__exittext_begin && + addr < (unsigned long)__exittext_end; +} + +static bool is_image_text(unsigned long addr) +{ + return core_kernel_text(addr) || is_exit_text(addr); +} + static void __kprobes *patch_map(void *addr, int fixmap) { unsigned long uintaddr = (uintptr_t) addr; - bool module = !core_kernel_text(uintaddr); + bool image = is_image_text(uintaddr); struct page *page;
- if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) - page = vmalloc_to_page(addr); - else if (!module) + if (image) page = phys_to_page(__pa_symbol(addr)); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); else return addr;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 4f77de8ce1384..0bab37b1acbe9 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -170,9 +170,12 @@ SECTIONS __inittext_begin = .;
INIT_TEXT_SECTION(8) + + __exittext_begin = .; .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) } + __exittext_end = .;
. = ALIGN(4); .altinstructions : {
From: Stanislav Fomichev sdf@google.com
[ Upstream commit ef8c84effce3c7a0b8196fcda8f430c815ab511c ]
It looks like BPF program that handles BPF_SOCK_OPS_STATE_CB state can race with the bpf_map_lookup_elem("global_map"); I sometimes see the failures in this test and re-running helps.
Since we know that we expect the callback to be called 3 times (one time for listener socket, two times for both ends of the connection), let's export this number and add simple retry logic around that.
Also, let's make EXPECT_EQ() not return on failure, but continue evaluating all conditions; that should make potential debugging easier.
With this fix in place I don't observe the flakiness anymore.
Signed-off-by: Stanislav Fomichev sdf@google.com Signed-off-by: Alexei Starovoitov ast@kernel.org Cc: Lawrence Brakmo brakmo@fb.com Link: https://lore.kernel.org/bpf/20191204190955.170934-1-sdf@google.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../selftests/bpf/progs/test_tcpbpf_kern.c | 1 + tools/testing/selftests/bpf/test_tcpbpf.h | 1 + .../testing/selftests/bpf/test_tcpbpf_user.c | 25 +++++++++++++------ 3 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 2e233613d1fc0..7fa4595d2b66b 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -131,6 +131,7 @@ int bpf_testcb(struct bpf_sock_ops *skops) g.bytes_received = skops->bytes_received; g.bytes_acked = skops->bytes_acked; } + g.num_close_events++; bpf_map_update_elem(&global_map, &key, &g, BPF_ANY); } diff --git a/tools/testing/selftests/bpf/test_tcpbpf.h b/tools/testing/selftests/bpf/test_tcpbpf.h index 7bcfa62070056..6220b95cbd02c 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf.h +++ b/tools/testing/selftests/bpf/test_tcpbpf.h @@ -13,5 +13,6 @@ struct tcpbpf_globals { __u64 bytes_received; __u64 bytes_acked; __u32 num_listen; + __u32 num_close_events; }; #endif diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 716b4e3be5813..3ae127620463d 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -16,6 +16,9 @@
#include "test_tcpbpf.h"
+/* 3 comes from one listening socket + both ends of the connection */ +#define EXPECTED_CLOSE_EVENTS 3 + #define EXPECT_EQ(expected, actual, fmt) \ do { \ if ((expected) != (actual)) { \ @@ -23,13 +26,14 @@ " Actual: %" fmt "\n" \ " Expected: %" fmt "\n", \ (actual), (expected)); \ - goto err; \ + ret--; \ } \ } while (0)
int verify_result(const struct tcpbpf_globals *result) { __u32 expected_events; + int ret = 0;
expected_events = ((1 << BPF_SOCK_OPS_TIMEOUT_INIT) | (1 << BPF_SOCK_OPS_RWND_INIT) | @@ -48,15 +52,15 @@ int verify_result(const struct tcpbpf_globals *result) EXPECT_EQ(0x80, result->bad_cb_test_rv, PRIu32); EXPECT_EQ(0, result->good_cb_test_rv, PRIu32); EXPECT_EQ(1, result->num_listen, PRIu32); + EXPECT_EQ(EXPECTED_CLOSE_EVENTS, result->num_close_events, PRIu32);
- return 0; -err: - return -1; + return ret; }
int verify_sockopt_result(int sock_map_fd) { __u32 key = 0; + int ret = 0; int res; int rv;
@@ -69,9 +73,7 @@ int verify_sockopt_result(int sock_map_fd) rv = bpf_map_lookup_elem(sock_map_fd, &key, &res); EXPECT_EQ(0, rv, "d"); EXPECT_EQ(1, res, "d"); - return 0; -err: - return -1; + return ret; }
static int bpf_find_map(const char *test, struct bpf_object *obj, @@ -96,6 +98,7 @@ int main(int argc, char **argv) int error = EXIT_FAILURE; struct bpf_object *obj; int cg_fd = -1; + int retry = 10; __u32 key = 0; int rv;
@@ -134,12 +137,20 @@ int main(int argc, char **argv) if (sock_map_fd < 0) goto err;
+retry_lookup: rv = bpf_map_lookup_elem(map_fd, &key, &g); if (rv != 0) { printf("FAILED: bpf_map_lookup_elem returns %d\n", rv); goto err; }
+ if (g.num_close_events != EXPECTED_CLOSE_EVENTS && retry--) { + printf("Unexpected number of close events (%d), retrying!\n", + g.num_close_events); + usleep(100); + goto retry_lookup; + } + if (verify_result(&g)) { printf("FAILED: Wrong stats\n"); goto err;
From: Xiaoming Ni nixiaoming@huawei.com
[ Upstream commit 1a50cb80f219c44adb6265f5071b81fc3c1deced ]
Registering the same notifier to a hook repeatedly can cause the hook list to form a ring or lose other members of the list.
case1: An infinite loop in notifier_chain_register() can cause soft lockup atomic_notifier_chain_register(&test_notifier_list, &test1); atomic_notifier_chain_register(&test_notifier_list, &test1); atomic_notifier_chain_register(&test_notifier_list, &test2);
case2: An infinite loop in notifier_chain_register() can cause soft lockup atomic_notifier_chain_register(&test_notifier_list, &test1); atomic_notifier_chain_register(&test_notifier_list, &test1); atomic_notifier_call_chain(&test_notifier_list, 0, NULL);
case3: lose other hook test2 atomic_notifier_chain_register(&test_notifier_list, &test1); atomic_notifier_chain_register(&test_notifier_list, &test2); atomic_notifier_chain_register(&test_notifier_list, &test1);
case4: Unregister returns 0, but the hook is still in the linked list, and it is not really registered. If you call notifier_call_chain after ko is unloaded, it will trigger oops.
If the system is configured with softlockup_panic and the same hook is repeatedly registered on the panic_notifier_list, it will cause a loop panic.
Add a check in notifier_chain_register(), intercepting duplicate registrations to avoid infinite loops
Link: http://lkml.kernel.org/r/1568861888-34045-2-git-send-email-nixiaoming@huawei... Signed-off-by: Xiaoming Ni nixiaoming@huawei.com Reviewed-by: Vasily Averin vvs@virtuozzo.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Alexey Dobriyan adobriyan@gmail.com Cc: Anna Schumaker anna.schumaker@netapp.com Cc: Arjan van de Ven arjan@linux.intel.com Cc: J. Bruce Fields bfields@fieldses.org Cc: Chuck Lever chuck.lever@oracle.com Cc: David S. Miller davem@davemloft.net Cc: Jeff Layton jlayton@kernel.org Cc: Andy Lutomirski luto@kernel.org Cc: Ingo Molnar mingo@kernel.org Cc: Nadia Derbey Nadia.Derbey@bull.net Cc: "Paul E. McKenney" paulmck@kernel.org Cc: Sam Protsenko semen.protsenko@linaro.org Cc: Alan Stern stern@rowland.harvard.edu Cc: Thomas Gleixner tglx@linutronix.de Cc: Trond Myklebust trond.myklebust@hammerspace.com Cc: Viresh Kumar viresh.kumar@linaro.org Cc: Xiaoming Ni nixiaoming@huawei.com Cc: YueHaibing yuehaibing@huawei.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/notifier.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/kernel/notifier.c b/kernel/notifier.c index 157d7c29f7207..f6d5ffe4e72ec 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -23,7 +23,10 @@ static int notifier_chain_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { - WARN_ONCE(((*nl) == n), "double register detected"); + if (unlikely((*nl) == n)) { + WARN(1, "double register detected"); + return 0; + } if (n->priority > (*nl)->priority) break; nl = &((*nl)->next);
From: Joe Perches joe@perches.com
[ Upstream commit 5e1aada08cd19ea652b2d32a250501d09b02ff2e ]
Initialization is not guaranteed to zero padding bytes so use an explicit memset instead to avoid leaking any kernel content in any possible padding bytes.
Link: http://lkml.kernel.org/r/dfa331c00881d61c8ee51577a082d8bebd61805c.camel@perc... Signed-off-by: Joe Perches joe@perches.com Cc: Dan Carpenter error27@gmail.com Cc: Julia Lawall julia.lawall@lip6.fr Cc: Thomas Gleixner tglx@linutronix.de Cc: Kees Cook keescook@chromium.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sys.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/sys.c b/kernel/sys.c index a611d1d58c7d0..3459a5ce0da01 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1279,11 +1279,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) { - struct oldold_utsname tmp = {}; + struct oldold_utsname tmp;
if (!name) return -EFAULT;
+ memset(&tmp, 0, sizeof(tmp)); + down_read(&uts_sem); memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
From: Miaohe Lin linmiaohe@huawei.com
[ Upstream commit 0bda9498dd45280e334bfe88b815ebf519602cc3 ]
In kvm_vgic_dist_init() called from kvm_vgic_map_resources(), if dist->vgic_model is invalid, dist->spis will be freed without set dist->spis = NULL. And in vgicv2 resources clean up path, __kvm_vgic_destroy() will be called to free allocated resources. And dist->spis will be freed again in clean up chain because we forget to set dist->spis = NULL in kvm_vgic_dist_init() failed path. So double free would happen.
Signed-off-by: Miaohe Lin linmiaohe@huawei.com Signed-off-by: Marc Zyngier maz@kernel.org Reviewed-by: Eric Auger eric.auger@redhat.com Link: https://lore.kernel.org/r/1574923128-19956-1-git-send-email-linmiaohe@huawei... Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/arm/vgic/vgic-init.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 6f50c429196de..6d85c6d894c39 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -177,6 +177,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) break; default: kfree(dist->spis); + dist->spis = NULL; return -EINVAL; } }
From: "Steven Rostedt (VMware)" rostedt@goodmis.org
[ Upstream commit af74262337faa65d5ac2944553437d3f5fb29123 ]
When pulling in Divya Indi's patch, I made a minor fix to remove unneeded braces. I commited my fix up via "git commit -a --amend". Unfortunately, I didn't realize I had some changes I was testing in the module code, and those changes were applied to Divya's patch as well.
This reverts the accidental updates to the module code.
Cc: Jessica Yu jeyu@kernel.org Cc: Divya Indi divya.indi@oracle.com Reported-by: Peter Zijlstra peterz@infradead.org Fixes: e585e6469d6f ("tracing: Verify if trace array exists before destroying it.") Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/module.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/kernel/module.c b/kernel/module.c index 0e3743dd3a568..819c5d3b4c295 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3753,6 +3753,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_enable_ro(mod, false); module_enable_nx(mod); + module_enable_x(mod);
/* Mark state as coming so strong_try_module_get() ignores us, * but kallsyms etc. can see us. */ @@ -3775,11 +3776,6 @@ static int prepare_coming_module(struct module *mod) if (err) return err;
- /* Make module executable after ftrace is enabled */ - mutex_lock(&module_mutex); - module_enable_x(mod); - mutex_unlock(&module_mutex); - blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); return 0;
From: "Darrick J. Wong" darrick.wong@oracle.com
[ Upstream commit b1de6fc7520fe12949c070af0e8c0e4044cd3420 ]
Omar Sandoval reported that a 4G fallocate on the realtime device causes filesystem shutdowns due to a log reservation overflow that happens when we log the rtbitmap updates. Factor rtbitmap/rtsummary updates into the the tr_write and tr_itruncate log reservation calculation.
"The following reproducer results in a transaction log overrun warning for me:
mkfs.xfs -f -r rtdev=/dev/vdc -d rtinherit=1 -m reflink=0 /dev/vdb mount -o rtdev=/dev/vdc /dev/vdb /mnt fallocate -l 4G /mnt/foo
Reported-by: Omar Sandoval osandov@osandov.com Tested-by: Omar Sandoval osandov@osandov.com Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Reviewed-by: Brian Foster bfoster@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/libxfs/xfs_trans_resv.c | 96 +++++++++++++++++++++++++++------- 1 file changed, 77 insertions(+), 19 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index d12bbd526e7c0..b3584cd2cc164 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -196,6 +196,24 @@ xfs_calc_inode_chunk_res( return res; }
+/* + * Per-extent log reservation for the btree changes involved in freeing or + * allocating a realtime extent. We have to be able to log as many rtbitmap + * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, + * as well as the realtime summary block. + */ +unsigned int +xfs_rtalloc_log_count( + struct xfs_mount *mp, + unsigned int num_ops) +{ + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + unsigned int rtbmp_bytes; + + rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; + return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; +} + /* * Various log reservation values. * @@ -218,13 +236,21 @@ xfs_calc_inode_chunk_res(
/* * In a write transaction we can allocate a maximum of 2 - * extents. This gives: + * extents. This gives (t1): * the inode getting the new extents: inode size * the inode's bmap btree: max depth * block size * the agfs of the ags from which the extents are allocated: 2 * sector * the superblock free block counter: sector size * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size - * And the bmap_finish transaction can free bmap blocks in a join: + * Or, if we're writing to a realtime file (t2): + * the inode getting the new extents: inode size + * the inode's bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 1 block + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join (t3): * the agfs of the ags containing the blocks: 2 * sector size * the agfls of the ags containing the blocks: 2 * sector size * the super block free block counter: sector size @@ -234,40 +260,72 @@ STATIC uint xfs_calc_write_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t2 = xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), - XFS_FSB_TO_B(mp, 1)) + + blksz) + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1)))); + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); + } else { + t2 = 0; + } + + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); }
/* - * In truncating a file we free up to two extents at once. We can modify: + * In truncating a file we free up to two extents at once. We can modify (t1): * the inode being truncated: inode size * the inode's bmap btree: (max depth + 1) * block size - * And the bmap_finish transaction can free the blocks and bmap blocks: + * And the bmap_finish transaction can free the blocks and bmap blocks (t2): * the agf for each of the ags: 4 * sector size * the agfl for each of the ags: 4 * sector size * the super block to reflect the freed blocks: sector size * worst case split in allocation btrees per extent assuming 4 extents: * 4 exts * 2 trees * (2 * max depth - 1) * block size + * Or, if it's a realtime file (t3): + * the agf for each of the ags: 2 * sector size + * the agfl for each of the ags: 2 * sector size + * the super block to reflect the freed blocks: sector size + * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 2 exts * 1 block + * worst case split in allocation btrees per extent assuming 2 extents: + * 2 exts * 2 trees * (2 * max depth - 1) * block size */ STATIC uint xfs_calc_itruncate_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + - xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), - XFS_FSB_TO_B(mp, 1)))); + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); + + t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + } else { + t3 = 0; + } + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); }
/*
From: Kai Vehmanen kai.vehmanen@linux.intel.com
[ Upstream commit 8e85def5723eccea30ebf22645673692ab8cb3e2 ]
This reverts commit 42ec336f1f9d ("ALSA: hda: Disable regmap internal locking").
Without regmap locking, there is a race between snd_hda_codec_amp_init() and PM callbacks issuing regcache_sync(). This was caught by following kernel warning trace:
<4> [358.080081] WARNING: CPU: 2 PID: 4157 at drivers/base/regmap/regcache.c:498 regcache_cache_only+0xf5/0x130 [...] <4> [358.080148] Call Trace: <4> [358.080158] snd_hda_codec_amp_init+0x4e/0x100 [snd_hda_codec] <4> [358.080169] snd_hda_codec_amp_init_stereo+0x40/0x80 [snd_hda_codec]
Suggested-by: Takashi Iwai tiwai@suse.de BugLink: https://gitlab.freedesktop.org/drm/intel/issues/592 Signed-off-by: Kai Vehmanen kai.vehmanen@linux.intel.com Link: https://lore.kernel.org/r/20200108180856.5194-1-kai.vehmanen@linux.intel.com Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/hda/hdac_regmap.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index 2596a881186fa..49780399c2849 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -363,7 +363,6 @@ static const struct regmap_config hda_regmap_cfg = { .reg_write = hda_reg_write, .use_single_read = true, .use_single_write = true, - .disable_locking = true, };
/**
From: Tuong Lien tuong.t.lien@dektech.com.au
[ Upstream commit 49afb806cb650dd1f06f191994f3aa657d264009 ]
When a socket is suddenly shutdown or released, it will reject all the unreceived messages in its receive queue. This applies to a connected socket too, whereas there is only one 'FIN' message required to be sent back to its peer in this case.
In case there are many messages in the queue and/or some connections with such messages are shutdown at the same time, the link layer will easily get overflowed at the 'TIPC_SYSTEM_IMPORTANCE' backlog level because of the message rejections. As a result, the link will be taken down. Moreover, immediately when the link is re-established, the socket layer can continue to reject the messages and the same issue happens...
The commit refactors the '__tipc_shutdown()' function to only send one 'FIN' in the situation mentioned above. For the connectionless case, it is unavoidable but usually there is no rejections for such socket messages because they are 'dest-droppable' by default.
In addition, the new code makes the other socket states clear (e.g.'TIPC_LISTEN') and treats as a separate case to avoid misbehaving.
Acked-by: Ying Xue ying.xue@windriver.com Acked-by: Jon Maloy jon.maloy@ericsson.com Signed-off-by: Tuong Lien tuong.t.lien@dektech.com.au Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/tipc/socket.c | 53 ++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 21 deletions(-)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 5318bb6611abc..592c6b19aca72 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -260,12 +260,12 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err) * * Caller must hold socket lock */ -static void tsk_rej_rx_queue(struct sock *sk) +static void tsk_rej_rx_queue(struct sock *sk, int error) { struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue))) - tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT); + tipc_sk_respond(sk, skb, error); }
static bool tipc_sk_connected(struct sock *sk) @@ -515,34 +515,45 @@ static void __tipc_shutdown(struct socket *sock, int error) /* Remove any pending SYN message */ __skb_queue_purge(&sk->sk_write_queue);
- /* Reject all unreceived messages, except on an active connection - * (which disconnects locally & sends a 'FIN+' to peer). - */ - while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { - if (TIPC_SKB_CB(skb)->bytes_read) { - kfree_skb(skb); - continue; - } - if (!tipc_sk_type_connectionless(sk) && - sk->sk_state != TIPC_DISCONNECTING) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - tipc_node_remove_conn(net, dnode, tsk->portid); - } - tipc_sk_respond(sk, skb, error); + /* Remove partially received buffer if any */ + skb = skb_peek(&sk->sk_receive_queue); + if (skb && TIPC_SKB_CB(skb)->bytes_read) { + __skb_unlink(skb, &sk->sk_receive_queue); + kfree_skb(skb); }
- if (tipc_sk_type_connectionless(sk)) + /* Reject all unreceived messages if connectionless */ + if (tipc_sk_type_connectionless(sk)) { + tsk_rej_rx_queue(sk, error); return; + }
- if (sk->sk_state != TIPC_DISCONNECTING) { + switch (sk->sk_state) { + case TIPC_CONNECTING: + case TIPC_ESTABLISHED: + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + tipc_node_remove_conn(net, dnode, tsk->portid); + /* Send a FIN+/- to its peer */ + skb = __skb_dequeue(&sk->sk_receive_queue); + if (skb) { + __skb_queue_purge(&sk->sk_receive_queue); + tipc_sk_respond(sk, skb, error); + break; + } skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, tsk_own_node(tsk), tsk_peer_port(tsk), tsk->portid, error); if (skb) tipc_node_xmit_skb(net, skb, dnode, tsk->portid); - tipc_node_remove_conn(net, dnode, tsk->portid); - tipc_set_sk_state(sk, TIPC_DISCONNECTING); + break; + case TIPC_LISTEN: + /* Reject all SYN messages */ + tsk_rej_rx_queue(sk, error); + break; + default: + __skb_queue_purge(&sk->sk_receive_queue); + break; } }
@@ -2564,7 +2575,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - tsk_rej_rx_queue(new_sk); + tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
/* Connect new socket to it's peer */ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 8bf7092021f283944f0c5f4c364853201c45c611 ]
if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output.
https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin vvs@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/atm/proc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/net/atm/proc.c b/net/atm/proc.c index d79221fd4dae2..c318967073139 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v) static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = vcc_walk(seq, 1); - if (v) - (*pos)++; + (*pos)++; return v; }
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 1e3f9f073c47bee7c23e77316b07bc12338c5bba ]
if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output.
https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin vvs@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/core/neighbour.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 7b40d12f0c229..04953e5f25302 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3290,6 +3290,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } + (*pos)++; return NULL; }
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit a3ea86739f1bc7e121d921842f0f4a8ab1af94d9 ]
if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output.
https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin vvs@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/ipv4/route.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b3a8d32f7d8df..4360c90b636de 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -271,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } + (*pos)++; return NULL;
}
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 4fc427e0515811250647d44de38d87d7b0e0790f ]
if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output.
https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin vvs@virtuozzo.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/ipv6/ip6_fib.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 7a0c877ca306c..7662de1bd7fd2 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2474,14 +2474,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private;
+ ++(*pos); if (!v) goto iter_table;
n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); - if (n) { - ++*pos; + if (n) return n; - }
iter_table: ipv6_route_check_sernum(iter); @@ -2489,8 +2488,6 @@ iter_table: r = fib6_walk_continue(&iter->w); spin_unlock_bh(&iter->tbl->tb6_lock); if (r > 0) { - if (v) - ++*pos; return iter->w.leaf; } else if (r < 0) { fib6_walker_unlink(net, &iter->w);
From: Stephan Gerhold stephan@gerhold.net
[ Upstream commit 97de863673f07f424dd0666aefb4b6ecaba10171 ]
Disabling the display using MCDE currently results in a warning together with a delay caused by some timeouts:
mcde a0350000.mcde: MCDE display is disabled ------------[ cut here ]------------ WARNING: CPU: 0 PID: 20 at drivers/gpu/drm/drm_atomic_helper.c:2258 drm_atomic_helper_commit_hw_done+0xe0/0xe4 Hardware name: ST-Ericsson Ux5x0 platform (Device Tree Support) Workqueue: events drm_mode_rmfb_work_fn [<c010f468>] (unwind_backtrace) from [<c010b54c>] (show_stack+0x10/0x14) [<c010b54c>] (show_stack) from [<c079dd90>] (dump_stack+0x84/0x98) [<c079dd90>] (dump_stack) from [<c011d1b0>] (__warn+0xb8/0xd4) [<c011d1b0>] (__warn) from [<c011d230>] (warn_slowpath_fmt+0x64/0xc4) [<c011d230>] (warn_slowpath_fmt) from [<c0413048>] (drm_atomic_helper_commit_hw_done+0xe0/0xe4) [<c0413048>] (drm_atomic_helper_commit_hw_done) from [<c04159cc>] (drm_atomic_helper_commit_tail_rpm+0x44/0x6c) [<c04159cc>] (drm_atomic_helper_commit_tail_rpm) from [<c0415f5c>] (commit_tail+0x50/0x10c) [<c0415f5c>] (commit_tail) from [<c04160dc>] (drm_atomic_helper_commit+0xbc/0x128) [<c04160dc>] (drm_atomic_helper_commit) from [<c0430790>] (drm_framebuffer_remove+0x390/0x428) [<c0430790>] (drm_framebuffer_remove) from [<c0430860>] (drm_mode_rmfb_work_fn+0x38/0x48) [<c0430860>] (drm_mode_rmfb_work_fn) from [<c01368a8>] (process_one_work+0x1f0/0x43c) [<c01368a8>] (process_one_work) from [<c0136d48>] (worker_thread+0x254/0x55c) [<c0136d48>] (worker_thread) from [<c013c014>] (kthread+0x124/0x150) [<c013c014>] (kthread) from [<c01010e8>] (ret_from_fork+0x14/0x2c) Exception stack(0xeb14dfb0 to 0xeb14dff8) dfa0: 00000000 00000000 00000000 00000000 dfc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 dfe0: 00000000 00000000 00000000 00000000 00000013 00000000 ---[ end trace 314909bcd4c7d50c ]--- [drm:drm_atomic_helper_wait_for_dependencies] *ERROR* [CRTC:32:crtc-0] flip_done timed out [drm:drm_atomic_helper_wait_for_dependencies] *ERROR* [CONNECTOR:34:DSI-1] flip_done timed out [drm:drm_atomic_helper_wait_for_dependencies] *ERROR* [PLANE:31:plane-0] flip_done timed out
The reason for this is that there is a vblank event pending, but we never handle it after disabling the vblank interrupts.
Check if there is an vblank event pending when disabling the display, and clear it by sending a fake vblank event in that case.
Signed-off-by: Stephan Gerhold stephan@gerhold.net Tested-by: Linus Walleij linus.walleij@linaro.org Reviewed-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Linus Walleij linus.walleij@linaro.org Link: https://patchwork.freedesktop.org/patch/msgid/20191106165835.2863-8-stephan@... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/mcde/mcde_display.c | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 751454ae3cd10..28ed506285018 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -946,6 +946,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) struct drm_crtc *crtc = &pipe->crtc; struct drm_device *drm = crtc->dev; struct mcde *mcde = drm->dev_private; + struct drm_pending_vblank_event *event;
if (mcde->te_sync) drm_crtc_vblank_off(crtc); @@ -953,6 +954,15 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) /* Disable FIFO A flow */ mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
+ event = crtc->state->event; + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } + dev_info(drm->dev, "MCDE display is disabled\n"); }
From: Marco Elver elver@google.com
[ Upstream commit bf07132f96d426bcbf2098227fb680915cf44498 ]
This patch proposes to require marked atomic accesses surrounding raw_write_seqcount_barrier. We reason that otherwise there is no way to guarantee propagation nor atomicity of writes before/after the barrier [1]. For example, consider the compiler tears stores either before or after the barrier; in this case, readers may observe a partial value, and because readers are unaware that writes are going on (writes are not in a seq-writer critical section), will complete the seq-reader critical section while having observed some partial state. [1] https://lwn.net/Articles/793253/
This came up when designing and implementing KCSAN, because KCSAN would flag these accesses as data-races. After careful analysis, our reasoning as above led us to conclude that the best thing to do is to propose an amendment to the raw_seqcount_barrier usage.
Signed-off-by: Marco Elver elver@google.com Acked-by: Paul E. McKenney paulmck@kernel.org Signed-off-by: Paul E. McKenney paulmck@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/seqlock.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index bcf4cf26b8c89..a42a29952889c 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that, writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s)
From: Andrey Grodzovsky andrey.grodzovsky@amd.com
[ Upstream commit 135517d3565b48f4def3b1b82008bc17eb5d1c90 ]
Problem: Due to a race between drm_sched_cleanup_jobs in sched thread and drm_sched_job_timedout in timeout work there is a possiblity that bad job was already freed while still being accessed from the timeout thread.
Fix: Instead of just peeking at the bad job in the mirror list remove it from the list under lock and then put it back later when we are garanteed no race with main sched thread is possible which is after the thread is parked.
v2: Lock around processing ring_mirror_list in drm_sched_cleanup_jobs.
v3: Rebase on top of drm-misc-next. v2 is not needed anymore as drm_sched_get_cleanup_job already has a lock there.
v4: Fix comments to relfect latest code in drm-misc.
Signed-off-by: Andrey Grodzovsky andrey.grodzovsky@amd.com Reviewed-by: Christian König christian.koenig@amd.com Reviewed-by: Emily Deng Emily.Deng@amd.com Tested-by: Emily Deng Emily.Deng@amd.com Signed-off-by: Christian König christian.koenig@amd.com Link: https://patchwork.freedesktop.org/patch/342356 Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/scheduler/sched_main.c | 27 ++++++++++++++++++++++++++ 1 file changed, 27 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 30c5ddd6d081c..134e9106ebac1 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -284,10 +284,21 @@ static void drm_sched_job_timedout(struct work_struct *work) unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); + + /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ + spin_lock_irqsave(&sched->job_list_lock, flags); job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node);
if (job) { + /* + * Remove the bad job so it cannot be freed by concurrent + * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread + * is parked at which point it's safe. + */ + list_del_init(&job->node); + spin_unlock_irqrestore(&sched->job_list_lock, flags); + job->sched->ops->timedout_job(job);
/* @@ -298,6 +309,8 @@ static void drm_sched_job_timedout(struct work_struct *work) job->sched->ops->free_job(job); sched->free_guilty = false; } + } else { + spin_unlock_irqrestore(&sched->job_list_lock, flags); }
spin_lock_irqsave(&sched->job_list_lock, flags); @@ -369,6 +382,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
kthread_park(sched->thread);
+ /* + * Reinsert back the bad job here - now it's safe as + * drm_sched_get_cleanup_job cannot race against us and release the + * bad job at this point - we parked (waited for) any in progress + * (earlier) cleanups and drm_sched_get_cleanup_job will not be called + * now until the scheduler thread is unparked. + */ + if (bad && bad->sched == sched) + /* + * Add at the head of the queue to reflect it was the earliest + * job extracted. + */ + list_add(&bad->node, &sched->ring_mirror_list); + /* * Iterate the job list from later to earlier one and either deactive * their HW callbacks or remove them from mirror list if they already
From: Nikhil Devshatwar nikhil.nd@ti.com
[ Upstream commit 6e72eab2e7b7a157d554b8f9faed7676047be7c1 ]
When setting DMA for video capture from CSI channel, if the DMA size is not given, it ends up writing as much data as sent by the camera.
This may lead to overwriting the buffers causing memory corruption. Observed green lines on the default framebuffer.
Restrict the DMA to maximum height as specified in the S_FMT ioctl.
Signed-off-by: Nikhil Devshatwar nikhil.nd@ti.com Signed-off-by: Benoit Parrot bparrot@ti.com Signed-off-by: Hans Verkuil hverkuil-cisco@xs4all.nl Signed-off-by: Mauro Carvalho Chehab mchehab+huawei@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/media/platform/ti-vpe/cal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index 955a49b8e9c08..f06408009a9c2 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -678,12 +678,13 @@ static void pix_proc_config(struct cal_ctx *ctx) }
static void cal_wr_dma_config(struct cal_ctx *ctx, - unsigned int width) + unsigned int width, unsigned int height) { u32 val;
val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); + set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, CAL_WR_DMA_CTRL_DTAG_MASK); set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, @@ -1306,7 +1307,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) csi2_lane_config(ctx); csi2_ctx_config(ctx); pix_proc_config(ctx); - cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); + cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, + ctx->v_fmt.fmt.pix.height); cal_wr_dma_addr(ctx, addr); csi2_ppi_enable(ctx);
From: Viresh Kumar viresh.kumar@linaro.org
[ Upstream commit 03758d60265c773e1d06d436b99ee338f2ac55d6 ]
A kref or refcount isn't the right tool to be used here for counting number of devices that are sharing the static OPPs created for the OPP table. For example, we are reinitializing the kref again, after it reaches a value of 0 and frees the resources, if the static OPPs get added for the same OPP table structure (as the OPP table structure was never freed). That is messy and very unclear.
This patch makes parsed_static_opps an unsigned integer and uses it to count the number of users of the static OPPs. The increment and decrement to parsed_static_opps is done under opp_table->lock now to make sure no races are possible if the OPP table is getting added and removed in parallel (which doesn't happen in practice, but can in theory).
Signed-off-by: Viresh Kumar viresh.kumar@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/opp/core.c | 48 ++++++++++++++++++---------------------------- drivers/opp/of.c | 26 +++++++++++-------------- drivers/opp/opp.h | 6 ++---- 3 files changed, 32 insertions(+), 48 deletions(-)
diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 7b057c32e11b1..29dfaa591f8b0 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -990,7 +990,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index) BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); INIT_LIST_HEAD(&opp_table->opp_list); kref_init(&opp_table->kref); - kref_init(&opp_table->list_kref);
/* Secure the device table modification */ list_add(&opp_table->node, &opp_tables); @@ -1074,33 +1073,6 @@ static void _opp_table_kref_release(struct kref *kref) mutex_unlock(&opp_table_lock); }
-void _opp_remove_all_static(struct opp_table *opp_table) -{ - struct dev_pm_opp *opp, *tmp; - - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { - if (!opp->dynamic) - dev_pm_opp_put(opp); - } - - opp_table->parsed_static_opps = false; -} - -static void _opp_table_list_kref_release(struct kref *kref) -{ - struct opp_table *opp_table = container_of(kref, struct opp_table, - list_kref); - - _opp_remove_all_static(opp_table); - mutex_unlock(&opp_table_lock); -} - -void _put_opp_list_kref(struct opp_table *opp_table) -{ - kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release, - &opp_table_lock); -} - void dev_pm_opp_put_opp_table(struct opp_table *opp_table) { kref_put_mutex(&opp_table->kref, _opp_table_kref_release, @@ -1204,6 +1176,24 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) } EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
+void _opp_remove_all_static(struct opp_table *opp_table) +{ + struct dev_pm_opp *opp, *tmp; + + mutex_lock(&opp_table->lock); + + if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps) + goto unlock; + + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { + if (!opp->dynamic) + dev_pm_opp_put_unlocked(opp); + } + +unlock: + mutex_unlock(&opp_table->lock); +} + /** * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs * @dev: device for which we do this operation @@ -2209,7 +2199,7 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev) return; }
- _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table);
/* Drop reference taken by _find_opp_table() */ dev_pm_opp_put_opp_table(opp_table); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1e5fcdee043c4..9cd8f0adacae4 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -658,17 +658,15 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */ + mutex_lock(&opp_table->lock); if (opp_table->parsed_static_opps) { - kref_get(&opp_table->list_kref); + opp_table->parsed_static_opps++; + mutex_unlock(&opp_table->lock); return 0; }
- /* - * Re-initialize list_kref every time we add static OPPs to the OPP - * table as the reference count may be 0 after the last tie static OPPs - * were removed. - */ - kref_init(&opp_table->list_kref); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock);
/* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -678,7 +676,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); of_node_put(np); - goto put_list_kref; + goto remove_static_opp; } else if (opp) { count++; } @@ -687,7 +685,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) /* There should be one of more OPP defined */ if (WARN_ON(!count)) { ret = -ENOENT; - goto put_list_kref; + goto remove_static_opp; }
list_for_each_entry(opp, &opp_table->opp_list, node) @@ -698,18 +696,16 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) dev_err(dev, "Not all nodes have performance state set (%d: %d)\n", count, pstate_count); ret = -ENOENT; - goto put_list_kref; + goto remove_static_opp; }
if (pstate_count) opp_table->genpd_performance_state = true;
- opp_table->parsed_static_opps = true; - return 0;
-put_list_kref: - _put_opp_list_kref(opp_table); +remove_static_opp: + _opp_remove_all_static(opp_table);
return ret; } @@ -746,7 +742,7 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) if (ret) { dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret); - _put_opp_list_kref(opp_table); + _opp_remove_all_static(opp_table); return ret; } nr -= 2; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 01a500e2c40a1..d14e27102730c 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -127,11 +127,10 @@ enum opp_table_access { * @dev_list: list of devices that share these OPPs * @opp_list: table of opps * @kref: for reference count of the table. - * @list_kref: for reference count of the OPP list. * @lock: mutex protecting the opp_list and dev_list. * @np: struct device_node pointer for opp's DT node. * @clock_latency_ns_max: Max clock latency in nanoseconds. - * @parsed_static_opps: True if OPPs are initialized from DT. + * @parsed_static_opps: Count of devices for which OPPs are initialized from DT. * @shared_opp: OPP is shared between multiple devices. * @suspend_opp: Pointer to OPP to be used during device suspend. * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers. @@ -167,7 +166,6 @@ struct opp_table { struct list_head dev_list; struct list_head opp_list; struct kref kref; - struct kref list_kref; struct mutex lock;
struct device_node *np; @@ -176,7 +174,7 @@ struct opp_table { /* For backward compatibility with v1 bindings */ unsigned int voltage_tolerance_v1;
- bool parsed_static_opps; + unsigned int parsed_static_opps; enum opp_table_access shared_opp; struct dev_pm_opp *suspend_opp;
From: Quinn Tran qutran@marvell.com
[ Upstream commit e1217dc3edce62895595cf484af33b9e0379b7f3 ]
Fix race condition between GNL completion processing and GNL request. Late submission of GNL request was not seen by the GNL completion thread. This patch will re-submit the GNL request for late submission fcport.
Link: https://lore.kernel.org/r/20191217220617.28084-13-hmadhani@marvell.com Signed-off-by: Quinn Tran qutran@marvell.com Signed-off-by: Himanshu Madhani hmadhani@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/qla2xxx/qla_init.c | 15 +++++++++++++-- drivers/scsi/qla2xxx/qla_target.c | 21 +++++++++++++++------ 2 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ac4c47fc5f4c1..2f2e059f4575e 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1002,7 +1002,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name);
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, + ql_dbg(ql_dbg_disc, vha, 0x20e8, "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, @@ -1061,6 +1061,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); vha->gnl.sent = 0; + if (!list_empty(&vha->gnl.fcports)) { + /* retrigger gnl */ + list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, + gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) + break; + } + } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp); @@ -1995,7 +2005,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, ea->fcport->d_id.b24);
@@ -2066,6 +2076,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) set_bit(lid, vha->hw->loop_id_map); ea->fcport->loop_id = lid; ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b75e6e4d58c06..a7acc266cec06 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -957,7 +957,7 @@ void qlt_free_session_done(struct work_struct *work) struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + ql_dbg(ql_dbg_disc, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, @@ -1024,7 +1024,7 @@ void qlt_free_session_done(struct work_struct *work)
while (!READ_ONCE(sess->logout_completed)) { if (!traced) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + ql_dbg(ql_dbg_disc, vha, 0xf086, "%s: waiting for sess %p logout\n", __func__, sess); traced = true; @@ -1045,6 +1045,10 @@ void qlt_free_session_done(struct work_struct *work) (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); }
+ spin_lock_irqsave(&vha->work_lock, flags); + sess->flags &= ~FCF_ASYNC_SENT; + spin_unlock_irqrestore(&vha->work_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->se_sess) { sess->se_sess = NULL; @@ -1108,7 +1112,7 @@ void qlt_free_session_done(struct work_struct *work) spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); sess->free_pending = 0;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, + ql_dbg(ql_dbg_disc, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count);
@@ -1151,6 +1155,11 @@ void qlt_unreg_sess(struct fc_port *sess) return; } sess->free_pending = 1; + /* + * Use FCF_ASYNC_SENT flag to block other cmds used in sess + * management from being sent. + */ + sess->flags |= FCF_ASYNC_SENT; spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess) @@ -4580,7 +4589,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport_id collision */ if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, + ql_dbg(ql_dbg_disc, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn);
@@ -4596,7 +4605,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ - ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, + ql_dbg(ql_dbg_disc, vha, 0xf01b, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn);
@@ -4611,7 +4620,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport handle collision */ if ((loop_id == other_sess->loop_id) && (loop_id != FC_NO_LOOP_ID)) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, + ql_dbg(ql_dbg_disc, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn);
From: James Smart jsmart2021@gmail.com
[ Upstream commit be0709e449ac9d9753a5c17e5b770d6e5e930e4a ]
NVMe device re-discovery does not complete. Dev_loss_tmo messages seen on initiator after recovery from a link disturbance.
The failing case is the following:
When the driver (as a NVME target) receives a PLOGI, the driver initiates an "unreg rpi" mailbox command. While the mailbox command is in progress, the driver requests that an ACC be sent to the initiator. The target's ACC is received by the initiator and the initiator then transmits a PLOGI. The driver receives the PLOGI prior to receiving the completion for the PLOGI response WQE that sent the ACC. (Different delivery sources from the hw so the race is very possible). Given the PLOGI is prior to the ACC completion (signifying PLOGI exchange complete), the driver LS_RJT's the PRLI. The "unreg rpi" mailbox then completes. Since PRLI has been received, the driver transmits a PLOGI to restart discovery, which the initiator then ACC's. If the driver processes the (re)PLOGI ACC prior to the completing the handling for the earlier ACC it sent the intiators original PLOGI, there is no state change for completion of the (re)PLOGI. The ndlp remains in "PLOGI Sent" and the initiator continues sending PRLI's which are rejected by the target until timeout or retry is reached.
Fix by: When in target mode, defer sending an ACC for the received PLOGI until unreg RPI completes.
Link: https://lore.kernel.org/r/20191218235808.31922-2-jsmart2021@gmail.com Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc_nportdisc.c | 108 ++++++++++++++++++++++++++--- 1 file changed, 99 insertions(+), 9 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 2a340624bfc99..590a49e847626 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -308,7 +308,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) mb->mbxStatus); mempool_free(login_mbox, phba->mbox_mem_pool); mempool_free(link_mbox, phba->mbox_mem_pool); - lpfc_sli_release_iocbq(phba, save_iocb); + kfree(save_iocb); return; }
@@ -325,7 +325,61 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) }
mempool_free(link_mbox, phba->mbox_mem_pool); - lpfc_sli_release_iocbq(phba, save_iocb); + kfree(save_iocb); +} + +/** + * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function provides the unreg rpi mailbox completion handler for a tgt. + * The routine frees the memory resources associated with the completed + * mailbox command and transmits the ELS ACC. + * + * This routine is only called if we are SLI4, acting in target + * mode and the remote NPort issues the PLOGI after link up. + **/ +void +lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; + LPFC_MBOXQ_t *mbox = pmb->context3; + struct lpfc_iocbq *piocb = NULL; + int rc; + + if (mbox) { + pmb->context3 = NULL; + piocb = mbox->context3; + mbox->context3 = NULL; + } + + /* + * Complete the unreg rpi mbx request, and update flags. + * This will also restart any deferred events. + */ + lpfc_nlp_get(ndlp); + lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); + + if (!piocb) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, + "4578 PLOGI ACC fail\n"); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, + "4579 PLOGI ACC fail %x\n", rc); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + } + kfree(piocb); +out: + lpfc_nlp_put(ndlp); }
static int @@ -345,6 +399,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *save_iocb; struct ls_rjt stat; uint32_t vid, flag; + u16 rpi; int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt)); @@ -488,7 +543,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, link_mbox->vport = vport; link_mbox->ctx_ndlp = ndlp;
- save_iocb = lpfc_sli_get_iocbq(phba); + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); if (!save_iocb) goto out; /* Save info from cmd IOCB used in rsp */ @@ -513,7 +568,36 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->nvmet_support && !defer_acc) { + link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!link_mbox) + goto out; + + /* As unique identifiers such as iotag would be overwritten + * with those from the cmdiocb, allocate separate temporary + * storage for the copy. + */ + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) + goto out; + + /* Unreg RPI is required for SLI4. */ + rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox); + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + link_mbox->mbox_cmpl = lpfc_defer_acc_rsp; + + if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) + ndlp->nlp_flag |= NLP_UNREG_INP; + + /* Save info from cmd IOCB used in rsp */ + memcpy(save_iocb, cmdiocb, sizeof(*save_iocb)); + + /* Delay sending ACC till unreg RPI completes. */ + defer_acc = 1; + } else if (phba->sli_rev == LPFC_SLI_REV4) lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, @@ -553,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if ((vport->port_type == LPFC_NPIV_PORT && vport->cfg_restrict_login)) {
+ /* no deferred ACC */ + kfree(save_iocb); + /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is @@ -571,8 +658,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (defer_acc) { /* So the order here should be: - * Issue CONFIG_LINK mbox - * CONFIG_LINK cmpl + * SLI3 pt2pt + * Issue CONFIG_LINK mbox + * CONFIG_LINK cmpl + * SLI4 tgt + * Issue UNREG RPI mbx + * UNREG RPI cmpl * Issue PLOGI ACC * PLOGI ACC cmpl * Issue REG_LOGIN mbox @@ -596,10 +687,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, out: if (defer_acc) lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "4577 pt2pt discovery failure: %p %p %p\n", + "4577 discovery failure: %p %p %p\n", save_iocb, link_mbox, login_mbox); - if (save_iocb) - lpfc_sli_release_iocbq(phba, save_iocb); + kfree(save_iocb); if (link_mbox) mempool_free(link_mbox, phba->mbox_mem_pool); if (login_mbox)
From: Kevin Kou qdkevin.kou@gmail.com
[ Upstream commit f643ee295c1c63bc117fb052d4da681354d6f732 ]
The original patch bringed in the "SCTP ACK tracking trace event" feature was committed at Dec.20, 2017, it replaced jprobe usage with trace events, and bringed in two trace events, one is TRACE_EVENT(sctp_probe), another one is TRACE_EVENT(sctp_probe_path). The original patch intended to trigger the trace_sctp_probe_path in TRACE_EVENT(sctp_probe) as below code,
+TRACE_EVENT(sctp_probe, + + TP_PROTO(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk), + + TP_ARGS(ep, asoc, chunk), + + TP_STRUCT__entry( + __field(__u64, asoc) + __field(__u32, mark) + __field(__u16, bind_port) + __field(__u16, peer_port) + __field(__u32, pathmtu) + __field(__u32, rwnd) + __field(__u16, unack_data) + ), + + TP_fast_assign( + struct sk_buff *skb = chunk->skb; + + __entry->asoc = (unsigned long)asoc; + __entry->mark = skb->mark; + __entry->bind_port = ep->base.bind_addr.port; + __entry->peer_port = asoc->peer.port; + __entry->pathmtu = asoc->pathmtu; + __entry->rwnd = asoc->peer.rwnd; + __entry->unack_data = asoc->unack_data; + + if (trace_sctp_probe_path_enabled()) { + struct sctp_transport *sp; + + list_for_each_entry(sp, &asoc->peer.transport_addr_list, + transports) { + trace_sctp_probe_path(sp, asoc); + } + } + ),
But I found it did not work when I did testing, and trace_sctp_probe_path had no output, I finally found that there is trace buffer lock operation(trace_event_buffer_reserve) in include/trace/trace_events.h:
static notrace void \ trace_event_raw_event_##call(void *__data, proto) \ { \ struct trace_event_file *trace_file = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ struct trace_event_buffer fbuffer; \ struct trace_event_raw_##call *entry; \ int __data_size; \ \ if (trace_trigger_soft_disabled(trace_file)) \ return; \ \ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ \ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ sizeof(*entry) + __data_size); \ \ if (!entry) \ return; \ \ tstruct \ \ { assign; } \ \ trace_event_buffer_commit(&fbuffer); \ }
The reason caused no output of trace_sctp_probe_path is that trace_sctp_probe_path written in TP_fast_assign part of TRACE_EVENT(sctp_probe), and it will be placed( { assign; } ) after the trace_event_buffer_reserve() when compiler expands Macro,
entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ sizeof(*entry) + __data_size); \ \ if (!entry) \ return; \ \ tstruct \ \ { assign; } \
so trace_sctp_probe_path finally can not acquire trace_event_buffer and return no output, that is to say the nest of tracepoint entry function is not allowed. The function call flow is:
trace_sctp_probe() -> trace_event_raw_event_sctp_probe() -> lock buffer -> trace_sctp_probe_path() -> trace_event_raw_event_sctp_probe_path() --nested -> buffer has been locked and return no output.
This patch is to remove trace_sctp_probe_path from the TP_fast_assign part of TRACE_EVENT(sctp_probe) to avoid the nest of entry function, and trigger sctp_probe_path_trace in sctp_outq_sack.
After this patch, you can enable both events individually, # cd /sys/kernel/debug/tracing # echo 1 > events/sctp/sctp_probe/enable # echo 1 > events/sctp/sctp_probe_path/enable
Or, you can enable all the events under sctp.
# echo 1 > events/sctp/enable
Signed-off-by: Kevin Kou qdkevin.kou@gmail.com Acked-by: Marcelo Ricardo Leitner marcelo.leitner@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- include/trace/events/sctp.h | 9 --------- net/sctp/outqueue.c | 6 ++++++ 2 files changed, 6 insertions(+), 9 deletions(-)
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h index 7475c7be165aa..d4aac34365955 100644 --- a/include/trace/events/sctp.h +++ b/include/trace/events/sctp.h @@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe, __entry->pathmtu = asoc->pathmtu; __entry->rwnd = asoc->peer.rwnd; __entry->unack_data = asoc->unack_data; - - if (trace_sctp_probe_path_enabled()) { - struct sctp_transport *sp; - - list_for_each_entry(sp, &asoc->peer.transport_addr_list, - transports) { - trace_sctp_probe_path(sp, asoc); - } - } ),
TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 0dab62b67b9a4..adceb226ffab3 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -36,6 +36,7 @@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> +#include <trace/events/sctp.h>
/* Declare internal functions here. */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); @@ -1238,6 +1239,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) /* Grab the association's destination address list. */ transport_list = &asoc->peer.transport_addr_list;
+ /* SCTP path tracepoint for congestion control debugging. */ + list_for_each_entry(transport, transport_list, transports) { + trace_sctp_probe_path(transport, asoc); + } + sack_ctsn = ntohl(sack->cum_tsn_ack); gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); asoc->stats.gapcnt += gap_ack_blocks;
From: "Rafael J. Wysocki" rafael.j.wysocki@intel.com
[ Upstream commit 3df663a147fe077a6ee8444ec626738946e65547 ]
There is a race condition in acpi_ec_get_query_handler() theoretically allowing query handlers to go away before refernce counting them.
In order to avoid it, call kref_get() on query handlers under ec->mutex.
Also simplify the code a bit while at it.
Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/acpi/ec.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 57eacdcbf8208..1ec55345252b6 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1043,29 +1043,21 @@ void acpi_ec_unblock_transactions(void) /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ -static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false;
mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; }
static void acpi_ec_query_handler_release(struct kref *kref)
From: Bart Van Assche bvanassche@acm.org
[ Upstream commit e4d2add7fd5bc64ee3e388eabe6b9e081cb42e11 ]
Since the lrbp->cmd expression occurs multiple times, introduce a new local variable to hold that pointer. This patch does not change any functionality.
Cc: Bean Huo beanhuo@micron.com Cc: Can Guo cang@codeaurora.org Cc: Avri Altman avri.altman@wdc.com Cc: Stanley Chu stanley.chu@mediatek.com Cc: Tomas Winkler tomas.winkler@intel.com Link: https://lore.kernel.org/r/20191224220248.30138-3-bvanassche@acm.org Reviewed-by: Stanley Chu stanley.chu@mediatek.com Reviewed-by: Can Guo cang@codeaurora.org Reviewed-by: Alim Akhtar alim.akhtar@samsung.com Signed-off-by: Bart Van Assche bvanassche@acm.org Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/ufs/ufshcd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 5e502e1605549..020a93a40a982 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -334,27 +334,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, u8 opcode = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = -1;
if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ - if (lrbp->cmd) + if (cmd) ufshcd_add_cmd_upiu_trace(hba, tag, str); return; }
- if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); }
From: Bart Van Assche bvanassche@acm.org
[ Upstream commit eacf36f5bebde5089dddb3d5bfcbeab530b01f8a ]
Starting execution of a command before tracing a command may cause the completion handler to free data while it is being traced. Fix this race by tracing a command before it is submitted.
Cc: Bean Huo beanhuo@micron.com Cc: Can Guo cang@codeaurora.org Cc: Avri Altman avri.altman@wdc.com Cc: Stanley Chu stanley.chu@mediatek.com Cc: Tomas Winkler tomas.winkler@intel.com Link: https://lore.kernel.org/r/20191224220248.30138-5-bvanassche@acm.org Reviewed-by: Alim Akhtar alim.akhtar@samsung.com Signed-off-by: Bart Van Assche bvanassche@acm.org Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 020a93a40a982..d538b3d4f74a5 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1888,12 +1888,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_add_command_trace(hba, task_tag, "send"); }
/**
From: David Francis David.Francis@amd.com
[ Upstream commit b6adc57cff616da18ff8cff028d2ddf585c97334 ]
For DSC MST, sometimes monitors would break out in full-screen static. The issue traced back to the PPS generation code, where these variables were being used uninitialized and were picking up garbage.
memset to 0 to avoid this
Reviewed-by: Nicholas Kazlauskas nicholas.kazlauskas@amd.com Signed-off-by: David Francis David.Francis@amd.com Signed-off-by: Mikita Lipski mikita.lipski@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 3 +++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 3 +++ 2 files changed, 6 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index a519dbc5ecb65..5d6cbaebebc03 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -496,6 +496,9 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; uint8_t dsc_packed_pps[128];
+ memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + /* Enable DSC hw block */ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 1b419407af942..01040501d40e3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -207,6 +207,9 @@ static bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const str struct dsc_reg_values dsc_reg_vals; struct dsc_optc_config dsc_optc_cfg;
+ memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); + memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); + DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); dsc_config_log(dsc, dsc_cfg); DC_LOG_DSC("DSC Picture Parameter Set (PPS):");
From: Dmitry Osipenko digetx@gmail.com
[ Upstream commit b5d5605ca3cebb9b16c4f251635ef171ad18b80d ]
Potentially it is possible that interrupt may fire after transfer timeout. That may not end up well for the next transfer because interrupt handling may race with hardware resetting.
This is very unlikely to happen in practice, but anyway let's prevent the potential problem by enabling interrupt only at the moments when it is actually necessary to get some interrupt event.
Tested-by: Thierry Reding treding@nvidia.com Signed-off-by: Dmitry Osipenko digetx@gmail.com Signed-off-by: Wolfram Sang wsa@the-dreams.de Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/i2c/busses/i2c-tegra.c | 70 +++++++++++++++++----------------- 1 file changed, 36 insertions(+), 34 deletions(-)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 331f7cca9babe..5ca72fb0b406c 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_device.h> @@ -230,7 +231,6 @@ struct tegra_i2c_hw_feature { * @base_phys: physical base address of the I2C controller * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt - * @irq_disabled: used to track whether or not the interrupt is enabled * @is_dvc: identifies the DVC I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message @@ -240,7 +240,6 @@ struct tegra_i2c_hw_feature { * @bus_clk_rate: current I2C bus clock rate * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes * @is_multimaster_mode: track if I2C controller is in multi-master mode - * @xfer_lock: lock to serialize transfer submission and processing * @tx_dma_chan: DMA transmit channel * @rx_dma_chan: DMA receive channel * @dma_phys: handle to DMA resources @@ -260,7 +259,6 @@ struct tegra_i2c_dev { phys_addr_t base_phys; int cont_id; int irq; - bool irq_disabled; int is_dvc; struct completion msg_complete; int msg_err; @@ -270,8 +268,6 @@ struct tegra_i2c_dev { u32 bus_clk_rate; u16 clk_divisor_non_hs_mode; bool is_multimaster_mode; - /* xfer_lock: lock to serialize transfer submission and processing */ - spinlock_t xfer_lock; struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; dma_addr_t dma_phys; @@ -790,11 +786,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) if (err) return err;
- if (i2c_dev->irq_disabled) { - i2c_dev->irq_disabled = false; - enable_irq(i2c_dev->irq); - } - return 0; }
@@ -825,18 +816,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
- spin_lock(&i2c_dev->xfer_lock); if (status == 0) { dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), i2c_readl(i2c_dev, I2C_STATUS), i2c_readl(i2c_dev, I2C_CNFG)); i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; - - if (!i2c_dev->irq_disabled) { - disable_irq_nosync(i2c_dev->irq); - i2c_dev->irq_disabled = true; - } goto err; }
@@ -925,7 +910,6 @@ err:
complete(&i2c_dev->msg_complete); done: - spin_unlock(&i2c_dev->xfer_lock); return IRQ_HANDLED; }
@@ -999,6 +983,30 @@ out: i2c_writel(i2c_dev, val, reg); }
+static unsigned long +tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, + struct completion *complete, + unsigned int timeout_ms) +{ + unsigned long ret; + + enable_irq(i2c_dev->irq); + ret = wait_for_completion_timeout(complete, + msecs_to_jiffies(timeout_ms)); + disable_irq(i2c_dev->irq); + + /* + * There is a chance that completion may happen after IRQ + * synchronization, which is done by disable_irq(). + */ + if (ret == 0 && completion_done(complete)) { + dev_warn(i2c_dev->dev, "completion done after timeout\n"); + ret = 1; + } + + return ret; +} + static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); @@ -1020,8 +1028,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG); tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
- time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(50)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, 50); if (time_left == 0) { dev_err(i2c_dev->dev, "timed out for bus clear\n"); return -ETIMEDOUT; @@ -1044,7 +1052,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, u32 packet_header; u32 int_mask; unsigned long time_left; - unsigned long flags; size_t xfer_size; u32 *buffer = NULL; int err = 0; @@ -1075,7 +1082,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, */ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, i2c_dev->bus_clk_rate); - spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; tegra_i2c_unmask_irq(i2c_dev, int_mask); @@ -1090,7 +1096,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting RX DMA failed, err %d\n", err); - goto unlock; + return err; }
} else { @@ -1149,7 +1155,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting TX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { tegra_i2c_fill_tx_fifo(i2c_dev); @@ -1169,15 +1175,10 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK));
-unlock: - spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags); - if (dma) { - if (err) - return err; + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->dma_complete, xfer_time);
- time_left = wait_for_completion_timeout(&i2c_dev->dma_complete, - msecs_to_jiffies(xfer_time)); if (time_left == 0) { dev_err(i2c_dev->dev, "DMA transfer timeout\n"); dmaengine_terminate_sync(i2c_dev->msg_read ? @@ -1202,13 +1203,13 @@ unlock: i2c_dev->tx_dma_chan); }
- time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(xfer_time)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, xfer_time); + tegra_i2c_mask_irq(i2c_dev, int_mask);
if (time_left == 0) { dev_err(i2c_dev->dev, "i2c transfer timed out\n"); - tegra_i2c_init(i2c_dev, true); return -ETIMEDOUT; } @@ -1568,7 +1569,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) I2C_PACKET_HEADER_SIZE; init_completion(&i2c_dev->msg_complete); init_completion(&i2c_dev->dma_complete); - spin_lock_init(&i2c_dev->xfer_lock);
if (!i2c_dev->hw->has_single_clk_source) { fast_clk = devm_clk_get(&pdev->dev, "fast-clk"); @@ -1644,6 +1644,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto release_dma; }
+ irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); if (ret) {
From: Ard Biesheuvel ardb@kernel.org
[ Upstream commit 64c8a0cd0a535891d5905c3a1651150f0f141439 ]
The new of_devlink support breaks PCIe probing on ARM platforms booting via UEFI if the firmware exposes a EFI framebuffer that is backed by a PCI device. The reason is that the probing order gets reversed, resulting in a resource conflict on the framebuffer memory window when the PCIe probes last, causing it to give up entirely.
Given that we rely on PCI quirks to deal with EFI framebuffers that get moved around in memory, we cannot simply drop the memory reservation, so instead, let's use the device link infrastructure to register this dependency, and force the probing to occur in the expected order.
Co-developed-by: Saravana Kannan saravanak@google.com Signed-off-by: Ard Biesheuvel ardb@kernel.org Signed-off-by: Saravana Kannan saravanak@google.com Signed-off-by: Ard Biesheuvel ardb@kernel.org Signed-off-by: Ingo Molnar mingo@kernel.org Link: https://lore.kernel.org/r/20200113172245.27925-9-ardb@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/firmware/efi/arm-init.c | 107 ++++++++++++++++++++++++++++++-- 1 file changed, 103 insertions(+), 4 deletions(-)
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 311cd349a8628..f136b77e13d98 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -10,10 +10,12 @@ #define pr_fmt(fmt) "efi: " fmt
#include <linux/efi.h> +#include <linux/fwnode.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mm_types.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <linux/screen_info.h> @@ -267,15 +269,112 @@ void __init efi_init(void) efi_memmap_unmap(); }
+static bool efifb_overlaps_pci_range(const struct of_pci_range *range) +{ + u64 fb_base = screen_info.lfb_base; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; + + return fb_base >= range->cpu_addr && + fb_base < (range->cpu_addr + range->size); +} + +static struct device_node *find_pci_overlap_node(void) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + struct of_pci_range_parser parser; + struct of_pci_range range; + int err; + + err = of_pci_range_parser_init(&parser, np); + if (err) { + pr_warn("of_pci_range_parser_init() failed: %d\n", err); + continue; + } + + for_each_of_pci_range(&parser, &range) + if (efifb_overlaps_pci_range(&range)) + return np; + } + return NULL; +} + +/* + * If the efifb framebuffer is backed by a PCI graphics controller, we have + * to ensure that this relation is expressed using a device link when + * running in DT mode, or the probe order may be reversed, resulting in a + * resource reservation conflict on the memory window that the efifb + * framebuffer steals from the PCIe host bridge. + */ +static int efifb_add_links(const struct fwnode_handle *fwnode, + struct device *dev) +{ + struct device_node *sup_np; + struct device *sup_dev; + + sup_np = find_pci_overlap_node(); + + /* + * If there's no PCI graphics controller backing the efifb, we are + * done here. + */ + if (!sup_np) + return 0; + + sup_dev = get_dev_from_fwnode(&sup_np->fwnode); + of_node_put(sup_np); + + /* + * Return -ENODEV if the PCI graphics controller device hasn't been + * registered yet. This ensures that efifb isn't allowed to probe + * and this function is retried again when new devices are + * registered. + */ + if (!sup_dev) + return -ENODEV; + + /* + * If this fails, retrying this function at a later point won't + * change anything. So, don't return an error after this. + */ + if (!device_link_add(dev, sup_dev, 0)) + dev_warn(dev, "device_link_add() failed\n"); + + put_device(sup_dev); + + return 0; +} + +static const struct fwnode_operations efifb_fwnode_ops = { + .add_links = efifb_add_links, +}; + +static struct fwnode_handle efifb_fwnode = { + .ops = &efifb_fwnode_ops, +}; + static int __init register_gop_device(void) { - void *pd; + struct platform_device *pd; + int err;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) return 0;
- pd = platform_device_register_data(NULL, "efi-framebuffer", 0, - &screen_info, sizeof(screen_info)); - return PTR_ERR_OR_ZERO(pd); + pd = platform_device_alloc("efi-framebuffer", 0); + if (!pd) + return -ENOMEM; + + if (IS_ENABLED(CONFIG_PCI)) + pd->dev.fwnode = &efifb_fwnode; + + err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); + if (err) + return err; + + return platform_device_add(pd); } subsys_initcall(register_gop_device);
From: Qu Wenruo wqu@suse.com
[ Upstream commit f6d2a5c263afca84646cf3300dc13061bedbd99e ]
Inspired by btrfs-progs github issue #208, where chunk item in chunk tree has invalid num_stripes (0).
Although that can already be caught by current btrfs_check_chunk_valid(), that function doesn't really check item size as it needs to handle chunk item in super block sys_chunk_array().
This patch will add two extra checks for chunk items in chunk tree:
- Basic chunk item size If the item is smaller than btrfs_chunk (which already contains one stripe), exit right now as reading num_stripes may even go beyond eb boundary.
- Item size check against num_stripes If item size doesn't match with calculated chunk size, then either the item size or the num_stripes is corrupted. Error out anyway.
Reviewed-by: Josef Bacik josef@toxicpanda.com Signed-off-by: Qu Wenruo wqu@suse.com Reviewed-by: David Sterba dsterba@suse.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/tree-checker.c | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-)
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 91ea38506fbb7..84b8d6ebf98f3 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -674,6 +674,44 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf, return 0; }
+/* + * Enhanced version of chunk item checker. + * + * The common btrfs_check_chunk_valid() doesn't check item size since it needs + * to work on super block sys_chunk_array which doesn't have full item ptr. + */ +static int check_leaf_chunk_item(struct extent_buffer *leaf, + struct btrfs_chunk *chunk, + struct btrfs_key *key, int slot) +{ + int num_stripes; + + if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) { + chunk_err(leaf, chunk, key->offset, + "invalid chunk item size: have %u expect [%zu, %u)", + btrfs_item_size_nr(leaf, slot), + sizeof(struct btrfs_chunk), + BTRFS_LEAF_DATA_SIZE(leaf->fs_info)); + return -EUCLEAN; + } + + num_stripes = btrfs_chunk_num_stripes(leaf, chunk); + /* Let btrfs_check_chunk_valid() handle this error type */ + if (num_stripes == 0) + goto out; + + if (btrfs_chunk_item_size(num_stripes) != + btrfs_item_size_nr(leaf, slot)) { + chunk_err(leaf, chunk, key->offset, + "invalid chunk item size: have %u expect %lu", + btrfs_item_size_nr(leaf, slot), + btrfs_chunk_item_size(num_stripes)); + return -EUCLEAN; + } +out: + return btrfs_check_chunk_valid(leaf, chunk, key->offset); +} + __printf(3, 4) __cold static void dev_item_err(const struct extent_buffer *eb, int slot, @@ -1265,7 +1303,7 @@ static int check_leaf_item(struct extent_buffer *leaf, break; case BTRFS_CHUNK_ITEM_KEY: chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); - ret = btrfs_check_chunk_valid(leaf, chunk, key->offset); + ret = check_leaf_chunk_item(leaf, chunk, key, slot); break; case BTRFS_DEV_ITEM_KEY: ret = check_dev_item(leaf, key, slot);
From: Matthias Fend matthias.fend@wolfvision.net
[ Upstream commit cc88525ebffc757e00cc5a5d61da6271646c7f5f ]
Since the dma engine expects the burst length register content as power of 2 value, the burst length needs to be converted first. Additionally add a burst length range check to avoid corrupting unrelated register bits.
Signed-off-by: Matthias Fend matthias.fend@wolfvision.net Link: https://lore.kernel.org/r/20200115102249.24398-1-matthias.fend@wolfvision.ne... Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dma/xilinx/zynqmp_dma.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9c845c07b107c..d47749a35863f 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -123,10 +123,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
@@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val;
val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); }
@@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan);
- chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN);
return 0; } @@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs);
chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
From: Thomas Richter tmricht@linux.ibm.com
[ Upstream commit 32dab6828c42f087439d3e2617dc7283546bd8f7 ]
Use kzalloc() to allocate auxiliary buffer structure initialized with all zeroes to avoid random value in trace output.
Avoid double access to SBD hardware flags.
Signed-off-by: Thomas Richter tmricht@linux.ibm.com Signed-off-by: Vasily Gorbik gor@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/s390/kernel/perf_cpum_sf.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 229e1e2f8253a..996e447ead3a6 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1429,8 +1429,8 @@ static int aux_output_begin(struct perf_output_handle *handle, idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); - te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; - te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; + te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | + SDB_TE_ALERT_REQ_MASK); te->overflow = 0; } /* Save the position of empty SDBs */ @@ -1477,8 +1477,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, te = aux_sdb_trailer(aux, alert_index); do { orig_flags = te->flags; - orig_overflow = te->overflow; - *overflow = orig_overflow; + *overflow = orig_overflow = te->overflow; if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { /* * SDB is already set by hardware. @@ -1712,7 +1711,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, }
/* Allocate aux_buffer struct for the event */ - aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); + aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); if (!aux) goto no_aux; sfb = &aux->sfb;
From: Trond Myklebust trondmy@gmail.com
[ Upstream commit 90d2f1da832fd23290ef0c0d964d97501e5e8553 ]
If nfsd_file_mark_find_or_create() keeps winning the race for the nfsd_file_fsnotify_group->mark_mutex against nfsd_file_mark_put() then it can soft lock up, since fsnotify_add_inode_mark() ends up always finding an existing entry.
Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Signed-off-by: J. Bruce Fields bfields@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfsd/filecache.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 3007b8945d388..51c08ae79063c 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -133,9 +133,13 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf) struct nfsd_file_mark, nfm_mark)); mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex); - fsnotify_put_mark(mark); - if (likely(nfm)) + if (nfm) { + fsnotify_put_mark(mark); break; + } + /* Avoid soft lockup race with nfsd_file_mark_put() */ + fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group); + fsnotify_put_mark(mark); } else mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
From: Frederic Barrat fbarrat@linux.ibm.com
[ Upstream commit 05dd7da76986937fb288b4213b1fa10dbe0d1b33 ]
The pci_dn structure used to store a pointer to the struct pci_dev, so taking a reference on the device was required. However, the pci_dev pointer was later removed from the pci_dn structure, but the reference was kept for the npu device. See commit 902bdc57451c ("powerpc/powernv/idoa: Remove unnecessary pcidev from pci_dn").
We don't need to take a reference on the device when assigning the PE as the struct pnv_ioda_pe is cleaned up at the same time as the (physical) device is released. Doing so prevents the device from being released, which is a problem for opencapi devices, since we want to be able to remove them through PCI hotplug.
Now the ugly part: nvlink npu devices are not meant to be released. Because of the above, we've always leaked a reference and simply removing it now is dangerous and would likely require more work. There's currently no release device callback for nvlink devices for example. So to be safe, this patch leaks a reference on the npu device, but only for nvlink and not opencapi.
Signed-off-by: Frederic Barrat fbarrat@linux.ibm.com Reviewed-by: Andrew Donnellan ajd@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20191121134918.7155-2-fbarrat@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/platforms/powernv/pci-ioda.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 058223233088e..e9cda7e316a50 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1062,14 +1062,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) return NULL; }
- /* NOTE: We get only one ref to the pci_dev for the pdn, not for the - * pointer in the PE data structure, both should be destroyed at the - * same time. However, this needs to be looked at more closely again - * once we actually start removing things (Hotplug, SR-IOV, ...) + /* NOTE: We don't get a reference for the pointer in the PE + * data structure, both the device and PE structures should be + * destroyed at the same time. However, removing nvlink + * devices will need some work. * * At some point we want to remove the PDN completely anyways */ - pci_dev_get(dev); pdn->pe_number = pe->pe_number; pe->flags = PNV_IODA_PE_DEV; pe->pdev = dev; @@ -1084,7 +1083,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) pnv_ioda_free_pe(pe); pdn->pe_number = IODA_INVALID_PE; pe->pdev = NULL; - pci_dev_put(dev); return NULL; }
@@ -1205,6 +1203,14 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus); struct pnv_phb *phb = hose->private_data;
+ /* + * Intentionally leak a reference on the npu device (for + * nvlink only; this is not an opencapi path) to make sure it + * never goes away, as it's been the case all along and some + * work is needed otherwise. + */ + pci_dev_get(npu_pdev); + /* * Due to a hardware errata PE#0 on the NPU is reserved for * error handling. This means we only have three PEs remaining @@ -1228,7 +1234,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) */ dev_info(&npu_pdev->dev, "Associating to existing PE %x\n", pe_num); - pci_dev_get(npu_pdev); npu_pdn = pci_get_pdn(npu_pdev); rid = npu_pdev->bus->number << 8 | npu_pdn->devfn; npu_pdn->pe_number = pe_num;
Le 18/09/2020 à 03:57, Sasha Levin a écrit :
From: Frederic Barrat fbarrat@linux.ibm.com
[ Upstream commit 05dd7da76986937fb288b4213b1fa10dbe0d1b33 ]
This patch is not desirable for stable, for 5.4 and 4.19 (it was already flagged by autosel back in April. Not sure why it's showing again now)
Fred
The pci_dn structure used to store a pointer to the struct pci_dev, so taking a reference on the device was required. However, the pci_dev pointer was later removed from the pci_dn structure, but the reference was kept for the npu device. See commit 902bdc57451c ("powerpc/powernv/idoa: Remove unnecessary pcidev from pci_dn").
We don't need to take a reference on the device when assigning the PE as the struct pnv_ioda_pe is cleaned up at the same time as the (physical) device is released. Doing so prevents the device from being released, which is a problem for opencapi devices, since we want to be able to remove them through PCI hotplug.
Now the ugly part: nvlink npu devices are not meant to be released. Because of the above, we've always leaked a reference and simply removing it now is dangerous and would likely require more work. There's currently no release device callback for nvlink devices for example. So to be safe, this patch leaks a reference on the npu device, but only for nvlink and not opencapi.
Signed-off-by: Frederic Barrat fbarrat@linux.ibm.com Reviewed-by: Andrew Donnellan ajd@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20191121134918.7155-2-fbarrat@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org
arch/powerpc/platforms/powernv/pci-ioda.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 058223233088e..e9cda7e316a50 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1062,14 +1062,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) return NULL; }
- /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
* pointer in the PE data structure, both should be destroyed at the
* same time. However, this needs to be looked at more closely again
* once we actually start removing things (Hotplug, SR-IOV, ...)
- /* NOTE: We don't get a reference for the pointer in the PE
* data structure, both the device and PE structures should be
* destroyed at the same time. However, removing nvlink
* devices will need some work.
*/
- At some point we want to remove the PDN completely anyways
- pci_dev_get(dev); pdn->pe_number = pe->pe_number; pe->flags = PNV_IODA_PE_DEV; pe->pdev = dev;
@@ -1084,7 +1083,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) pnv_ioda_free_pe(pe); pdn->pe_number = IODA_INVALID_PE; pe->pdev = NULL;
return NULL; }pci_dev_put(dev);
@@ -1205,6 +1203,14 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus); struct pnv_phb *phb = hose->private_data;
- /*
* Intentionally leak a reference on the npu device (for
* nvlink only; this is not an opencapi path) to make sure it
* never goes away, as it's been the case all along and some
* work is needed otherwise.
*/
- pci_dev_get(npu_pdev);
- /*
- Due to a hardware errata PE#0 on the NPU is reserved for
- error handling. This means we only have three PEs remaining
@@ -1228,7 +1234,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev) */ dev_info(&npu_pdev->dev, "Associating to existing PE %x\n", pe_num);
pci_dev_get(npu_pdev); npu_pdn = pci_get_pdn(npu_pdev); rid = npu_pdev->bus->number << 8 | npu_pdn->devfn; npu_pdn->pe_number = pe_num;
On Fri, Sep 18, 2020 at 08:35:06AM +0200, Frederic Barrat wrote:
Le 18/09/2020 à 03:57, Sasha Levin a écrit :
From: Frederic Barrat fbarrat@linux.ibm.com
[ Upstream commit 05dd7da76986937fb288b4213b1fa10dbe0d1b33 ]
This patch is not desirable for stable, for 5.4 and 4.19 (it was already flagged by autosel back in April. Not sure why it's showing again now)
Hey Fred,
This was a bit of a "lie", it wasn't a run of AUTOSEL, but rather an audit of patches that went into distro/vendor trees but not into the upstream stable trees.
I can see that this patch was pulled into Ubuntu's 5.4 tree, is it not needed in the upstream stable tree?
Le 19/09/2020 à 20:10, Sasha Levin a écrit :
On Fri, Sep 18, 2020 at 08:35:06AM +0200, Frederic Barrat wrote:
Le 18/09/2020 à 03:57, Sasha Levin a écrit :
From: Frederic Barrat fbarrat@linux.ibm.com
[ Upstream commit 05dd7da76986937fb288b4213b1fa10dbe0d1b33 ]
This patch is not desirable for stable, for 5.4 and 4.19 (it was already flagged by autosel back in April. Not sure why it's showing again now)
Hey Fred,
This was a bit of a "lie", it wasn't a run of AUTOSEL, but rather an audit of patches that went into distro/vendor trees but not into the upstream stable trees.
I can see that this patch was pulled into Ubuntu's 5.4 tree, is it not needed in the upstream stable tree?
That patch in itself is useless (it replaces a ref counter leak by another one). It was part of a longer series that we backported to Ubuntu's 5.4 tree. So it's really not needed on the stable trees. It likely wouldn't hurt or break anything, but there's really no point.
Fred
From: Oliver O'Halloran oohall@gmail.com
[ Upstream commit 4e0942c0302b5ad76b228b1a7b8c09f658a1d58a ]
Many drivers don't check for errors when they get a 0xFFs response from an MMIO load. As a result after an EEH event occurs a driver can get stuck in a polling loop unless it some kind of internal timeout logic.
Currently EEH tries to detect and report stuck drivers by dumping a stack trace after eeh_dev_check_failure() is called EEH_MAX_FAILS times on an already frozen PE. The value of EEH_MAX_FAILS was chosen so that a dump would occur every few seconds if the driver was spinning in a loop. This results in a lot of spurious stack traces in the kernel log.
Fix this by limiting it to printing one stack trace for each PE freeze. If the driver is truely stuck the kernel's hung task detector is better suited to reporting the probelm anyway.
Signed-off-by: Oliver O'Halloran oohall@gmail.com Reviewed-by: Sam Bobroff sbobroff@linux.ibm.com Tested-by: Sam Bobroff sbobroff@linux.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20191016012536.22588-1-oohall@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kernel/eeh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index bc8a551013be9..c35069294ecfb 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -503,7 +503,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) rc = 1; if (pe->state & EEH_PE_ISOLATED) { pe->check_count++; - if (pe->check_count % EEH_MAX_FAILS == 0) { + if (pe->check_count == EEH_MAX_FAILS) { dn = pci_device_to_OF_node(dev); if (dn) location = of_get_property(dn, "ibm,loc-code",
From: Maxim Mikityanskiy maxtram95@gmail.com
[ Upstream commit 268d3636dfb22254324774de1f8875174b3be064 ]
Currently, kmemdup is applied to the firmware data, and it invokes kmalloc under the hood. The firmware size and patch_length are big (more than PAGE_SIZE), and on some low-end systems (like ASUS E202SA) kmalloc may fail to allocate a contiguous chunk under high memory usage and fragmentation:
Bluetooth: hci0: RTL: examining hci_ver=06 hci_rev=000a lmp_ver=06 lmp_subver=8821 Bluetooth: hci0: RTL: rom_version status=0 version=1 Bluetooth: hci0: RTL: loading rtl_bt/rtl8821a_fw.bin kworker/u9:2: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP), nodemask=(null),cpuset=/,mems_allowed=0 <stack trace follows>
As firmware load happens on each resume, Bluetooth will stop working after several iterations, when the kernel fails to allocate an order-4 page.
This patch replaces kmemdup with kvmalloc+memcpy. It's not required to have a contiguous chunk here, because it's not mapped to the device directly.
Signed-off-by: Maxim Mikityanskiy maxtram95@gmail.com Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/bluetooth/btrtl.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index bf3c02be69305..0dfaf90a31b06 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, * the end. */ len = patch_length; - buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, - GFP_KERNEL); + buf = kvmalloc(patch_length, GFP_KERNEL); if (!buf) return -ENOMEM;
+ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
*_buf = buf; @@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) if (ret < 0) return ret; ret = fw->size; - *buff = kmemdup(fw->data, ret, GFP_KERNEL); - if (!*buff) + *buff = kvmalloc(fw->size, GFP_KERNEL); + if (*buff) + memcpy(*buff, fw->data, ret); + else ret = -ENOMEM;
release_firmware(fw); @@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, goto out;
if (btrtl_dev->cfg_len > 0) { - tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); + tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); if (!tbuff) { ret = -ENOMEM; goto out; }
memcpy(tbuff, fw_data, ret); - kfree(fw_data); + kvfree(fw_data);
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); ret += btrtl_dev->cfg_len; @@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, ret = rtl_download_firmware(hdev, fw_data, ret);
out: - kfree(fw_data); + kvfree(fw_data); return ret; }
void btrtl_free(struct btrtl_device_info *btrtl_dev) { - kfree(btrtl_dev->fw_data); - kfree(btrtl_dev->cfg_data); + kvfree(btrtl_dev->fw_data); + kvfree(btrtl_dev->cfg_data); kfree(btrtl_dev); } EXPORT_SYMBOL_GPL(btrtl_free);
From: Josef Bacik jbacik@fb.com
[ Upstream commit cbc3b92ce037f5e7536f6db157d185cd8b8f615c ]
I noticed when trying to use the trace-cmd python interface that reading the raw buffer wasn't working for kernel_stack events. This is because it uses a stubbed version of __dynamic_array that doesn't do the __data_loc trick and encode the length of the array into the field. Instead it just shows up as a size of 0. So change this to __array and set the len to FTRACE_STACK_ENTRIES since this is what we actually do in practice and matches how user_stack_trace works.
Link: http://lkml.kernel.org/r/1411589652-1318-1-git-send-email-jbacik@fb.com
Signed-off-by: Josef Bacik jbacik@fb.com [ Pulled from the archeological digging of my INBOX ] Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/trace/trace_entries.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index fc8e97328e540..78c146efb8623 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -174,7 +174,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
F_STRUCT( __field( int, size ) - __dynamic_array(unsigned long, caller ) + __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) ),
F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
From: Theodore Ts'o tytso@mit.edu
[ Upstream commit 244adf6426ee31a83f397b700d964cff12a247d3 ]
This fixes the direct I/O versus writeback race which can reveal stale data, and it improves the tail latency of commits on slow devices.
Link: https://lore.kernel.org/r/20200125022254.1101588-1-tytso@mit.edu Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ext4/super.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4aae7e3e89a12..c32b8161ad3e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1546,6 +1546,7 @@ static const match_table_t tokens = { {Opt_auto_da_alloc, "auto_da_alloc"}, {Opt_noauto_da_alloc, "noauto_da_alloc"}, {Opt_dioread_nolock, "dioread_nolock"}, + {Opt_dioread_lock, "nodioread_nolock"}, {Opt_dioread_lock, "dioread_lock"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, @@ -3750,6 +3751,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) set_opt(sb, NO_UID32); /* xattr user namespace & acls are now defaulted on */ set_opt(sb, XATTR_USER); + set_opt(sb, DIOREAD_NOLOCK); #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif @@ -3927,9 +3929,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) #endif
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { - printk_once(KERN_WARNING "EXT4-fs: Warning: mounting " - "with data=journal disables delayed " - "allocation and O_DIRECT support!\n"); + printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n"); + clear_opt(sb, DIOREAD_NOLOCK); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc");
Hello Sasha,
On 9/18/20 7:27 AM, Sasha Levin wrote:
From: Theodore Ts'o tytso@mit.edu
[ Upstream commit 244adf6426ee31a83f397b700d964cff12a247d3 ]
This fixes the direct I/O versus writeback race which can reveal stale data, and it improves the tail latency of commits on slow devices.
Link: https://lore.kernel.org/r/20200125022254.1101588-1-tytso@mit.edu Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org
I see that the subjected patch is being taken into many different stable trees. I remember there was a fixes patch on top of this[1]. Below thread[1] has more details. Just wanted to point this one out. Please ignore if already taken.
[1]: https://www.spinics.net/lists/linux-ext4/msg72219.html
-ritesh
From: Vincent Whitchurch vincent.whitchurch@axis.com
[ Upstream commit 40ff1ddb5570284e039e0ff14d7a859a73dc3673 ]
The stacktrace code can read beyond the stack size, when it attempts to read pt_regs from exception frames.
This can happen on normal, non-corrupt stacks. Since the unwind information in the extable is not correct for function prologues, the unwinding code can return data from the stack which is not actually the caller function address, and if in_entry_text() happens to succeed on this value, we can end up reading data from outside the task's stack when attempting to read pt_regs, since there is no bounds check.
Example:
[<8010e729>] (unwind_backtrace) from [<8010a9c9>] (show_stack+0x11/0x14) [<8010a9c9>] (show_stack) from [<8057d8d7>] (dump_stack+0x87/0xac) [<8057d8d7>] (dump_stack) from [<8012271d>] (tasklet_action_common.constprop.4+0xa5/0xa8) [<8012271d>] (tasklet_action_common.constprop.4) from [<80102333>] (__do_softirq+0x11b/0x31c) [<80102333>] (__do_softirq) from [<80122485>] (irq_exit+0xad/0xd8) [<80122485>] (irq_exit) from [<8015f3d7>] (__handle_domain_irq+0x47/0x84) [<8015f3d7>] (__handle_domain_irq) from [<8036a523>] (gic_handle_irq+0x43/0x78) [<8036a523>] (gic_handle_irq) from [<80101a49>] (__irq_svc+0x69/0xb4) Exception stack(0xeb491f58 to 0xeb491fa0) 1f40: 7eb14794 00000000 1f60: ffffffff 008dd32c 008dd324 ffffffff 008dd314 0000002a 801011e4 eb490000 1f80: 0000002a 7eb1478c 50c5387d eb491fa8 80101001 8023d09c 40080033 ffffffff [<80101a49>] (__irq_svc) from [<8023d09c>] (do_pipe2+0x0/0xac) [<8023d09c>] (do_pipe2) from [<ffffffff>] (0xffffffff) Exception stack(0xeb491fc8 to 0xeb492010) 1fc0: 008dd314 0000002a 00511ad8 008de4c8 7eb14790 7eb1478c 1fe0: 00511e34 7eb14774 004c8557 76f44098 60080030 7eb14794 00000000 00000000 2000: 00000001 00000000 ea846c00 ea847cc0
In this example, the stack limit is 0xeb492000, but 16 bytes outside the stack have been read.
Fix it by adding bounds checks.
Signed-off-by: Vincent Whitchurch vincent.whitchurch@axis.com Signed-off-by: Russell King rmk+kernel@armlinux.org.uk Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm/kernel/stacktrace.c | 2 ++ arch/arm/kernel/traps.c | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index a082f6e4f0f4a..76ea4178a55cb 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -116,6 +116,8 @@ static int save_trace(struct stackframe *frame, void *d) return 0;
regs = (struct pt_regs *)frame->sp; + if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) + return 0;
trace->entries[trace->nr_entries++] = regs->ARM_pc;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c053abd1fb539..97a512551b217 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { + unsigned long end = frame + 4 + sizeof(struct pt_regs); + #ifdef CONFIG_KALLSYMS printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif
- if (in_entry_text(from)) - dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) + dump_mem("", "Exception stack", frame + 4, end); }
void dump_backtrace_stm(u32 *stack, u32 instruction)
From: Mert Dirik mertdirik@gmail.com
[ Upstream commit 5b362498a79631f283578b64bf6f4d15ed4cc19a ]
Add the required USB ID for running SMCWUSBT-G2 wireless adapter (SMC "EZ Connect g").
This device uses ar5523 chipset and requires firmware to be loaded. Even though pid of the device is 4507, this patch adds it as 4506 so that AR5523_DEVICE_UG macro can set the AR5523_FLAG_PRE_FIRMWARE flag for pid 4507.
Signed-off-by: Mert Dirik mertdirik@gmail.com Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/ath/ar5523/ar5523.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index da2d179430ca5..4c57e79e5779a 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ + AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect + SMCWUSBT-G2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
From: Jeff Layton jlayton@kernel.org
[ Upstream commit 9a6bed4fe0c8bf57785cbc4db9f86086cb9b193d ]
If the caller passes in a NULL cap_reservation, and we can't allocate one then ensure that we fail gracefully.
Signed-off-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ceph/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index c07407586ce87..660a878e20ef2 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -754,8 +754,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page, info_caps = le32_to_cpu(info->cap.caps);
/* prealloc new cap struct */ - if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) + if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) { new_cap = ceph_get_cap(mdsc, caps_reservation); + if (!new_cap) + return -ENOMEM; + }
/* * prealloc xattr data, if it looks like we'll need it. only
From: Sven Schnelle svens@linux.ibm.com
[ Upstream commit af4ddd607dff7aabd466a4a878e01b9f592a75ab ]
test.d/ftrace/func-filter-glob.tc is failing on s390 because it has ARCH_INLINE_SPIN_LOCK and friends set to 'y'. So the usual __raw_spin_lock symbol isn't in the ftrace function list. Change '*aw*lock' to '*spin*lock' which would hopefully match some of the locking functions on all platforms.
Reviewed-by: Steven Rostedt (VMware) rostedt@goodmis.org Signed-off-by: Sven Schnelle svens@linux.ibm.com Signed-off-by: Shuah Khan skhan@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- .../testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 27a54a17da65d..f4e92afab14b2 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$' ftrace_filter_check 'schedule*' '^schedule.*$'
# filter by *mid*end -ftrace_filter_check '*aw*lock' '.*aw.*lock$' +ftrace_filter_check '*pin*lock' '.*pin.*lock$'
# filter by start*mid* ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
From: Doug Smythies doug.smythies@gmail.com
[ Upstream commit e749e09db30c38f1a275945814b0109e530a07b0 ]
Some syntax needs to be more rigorous for python 3. Backwards compatibility tested with python 2.7
Signed-off-by: Doug Smythies dsmythies@telus.net Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../intel_pstate_tracer.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py index 2d6d342b148f1..1351975d07699 100755 --- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py +++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py @@ -11,11 +11,11 @@ then this utility enables and collects trace data for a user specified interval and generates performance plots.
Prerequisites: - Python version 2.7.x + Python version 2.7.x or higher gnuplot 5.0 or higher - gnuplot-py 1.8 + gnuplot-py 1.8 or higher (Most of the distributions have these required packages. They may be called - gnuplot-py, phython-gnuplot. ) + gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
HWP (Hardware P-States are disabled) Kernel config for Linux trace is enabled @@ -181,7 +181,7 @@ def plot_pstate_cpu_with_sample(): g_plot('set xlabel "Samples"') g_plot('set ylabel "P-State"') g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -198,7 +198,7 @@ def plot_pstate_cpu(): # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' # - title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -212,7 +212,7 @@ def plot_load_cpu(): g_plot('set ylabel "CPU load (percent)"') g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -226,7 +226,7 @@ def plot_frequency_cpu(): g_plot('set ylabel "CPU Frequency (GHz)"') g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -241,7 +241,7 @@ def plot_duration_cpu(): g_plot('set ylabel "Timer Duration (MilliSeconds)"') g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -255,7 +255,7 @@ def plot_scaled_cpu(): g_plot('set ylabel "Scaled Busy (Unitless)"') g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -269,7 +269,7 @@ def plot_boost_cpu(): g_plot('set ylabel "CPU IO Boost (percent)"') g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -283,7 +283,7 @@ def plot_ghz_cpu(): g_plot('set ylabel "TSC Frequency (GHz)"') g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
- title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e 's/.csv//'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str)
From: Manish Mandlik mmandlik@google.com
[ Upstream commit 6c08fc896b60893c5d673764b0668015d76df462 ]
There is no lock preventing both l2cap_sock_release() and chan->ops->close() from running at the same time.
If we consider Thread A running l2cap_chan_timeout() and Thread B running l2cap_sock_release(), expected behavior is: A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb() A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill() B::l2cap_sock_release()->sock_orphan() B::l2cap_sock_release()->l2cap_sock_kill()
where, sock_orphan() clears "sk->sk_socket" and l2cap_sock_teardown_cb() marks socket as SOCK_ZAPPED.
In l2cap_sock_kill(), there is an "if-statement" that checks if both sock_orphan() and sock_teardown() has been run i.e. sk->sk_socket is NULL and socket is marked as SOCK_ZAPPED. Socket is killed if the condition is satisfied.
In the race condition, following occurs: A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb() B::l2cap_sock_release()->sock_orphan() B::l2cap_sock_release()->l2cap_sock_kill() A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill()
In this scenario, "if-statement" is true in both B::l2cap_sock_kill() and A::l2cap_sock_kill() and we hit "refcount: underflow; use-after-free" bug.
Similar condition occurs at other places where teardown/sock_kill is happening: l2cap_disconnect_rsp()->l2cap_chan_del()->l2cap_sock_teardown_cb() l2cap_disconnect_rsp()->l2cap_sock_close_cb()->l2cap_sock_kill()
l2cap_conn_del()->l2cap_chan_del()->l2cap_sock_teardown_cb() l2cap_conn_del()->l2cap_sock_close_cb()->l2cap_sock_kill()
l2cap_disconnect_req()->l2cap_chan_del()->l2cap_sock_teardown_cb() l2cap_disconnect_req()->l2cap_sock_close_cb()->l2cap_sock_kill()
l2cap_sock_cleanup_listen()->l2cap_chan_close()->l2cap_sock_teardown_cb() l2cap_sock_cleanup_listen()->l2cap_sock_kill()
Protect teardown/sock_kill and orphan/sock_kill by adding hold_lock on l2cap channel to ensure that the socket is killed only after marked as zapped and orphan.
Signed-off-by: Manish Mandlik mmandlik@google.com Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/l2cap_core.c | 26 +++++++++++++++----------- net/bluetooth/l2cap_sock.c | 16 +++++++++++++--- 2 files changed, 28 insertions(+), 14 deletions(-)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index a845786258a0b..eb2804ac50756 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -419,6 +419,9 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
mutex_lock(&conn->chan_lock); + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling + * this work. No need to call l2cap_chan_hold(chan) here again. + */ l2cap_chan_lock(chan);
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) @@ -431,12 +434,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_close(chan, reason);
- l2cap_chan_unlock(chan); - chan->ops->close(chan); - mutex_unlock(&conn->chan_lock);
+ l2cap_chan_unlock(chan); l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); }
struct l2cap_chan *l2cap_chan_create(void) @@ -1734,9 +1737,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_del(chan, err);
- l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); }
@@ -4355,6 +4358,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, return 0; }
+ l2cap_chan_hold(chan); l2cap_chan_lock(chan);
rsp.dcid = cpu_to_le16(chan->scid); @@ -4363,12 +4367,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan->ops->set_shutdown(chan);
- l2cap_chan_hold(chan); l2cap_chan_del(chan, ECONNRESET);
- l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock); @@ -4400,20 +4403,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, return 0; }
+ l2cap_chan_hold(chan); l2cap_chan_lock(chan);
if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); return 0; }
- l2cap_chan_hold(chan); l2cap_chan_del(chan, 0);
- l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a7be8b59b3c28..ab65304f3f637 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1042,7 +1042,7 @@ done: }
/* Kill socket (only if zapped and orphan) - * Must be called on unlocked socket. + * Must be called on unlocked socket, with l2cap channel lock. */ static void l2cap_sock_kill(struct sock *sk) { @@ -1203,8 +1203,15 @@ static int l2cap_sock_release(struct socket *sock)
err = l2cap_sock_shutdown(sock, 2);
+ l2cap_chan_hold(l2cap_pi(sk)->chan); + l2cap_chan_lock(l2cap_pi(sk)->chan); + sock_orphan(sk); l2cap_sock_kill(sk); + + l2cap_chan_unlock(l2cap_pi(sk)->chan); + l2cap_chan_put(l2cap_pi(sk)->chan); + return err; }
@@ -1222,12 +1229,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) BT_DBG("child chan %p state %s", chan, state_to_string(chan->state));
+ l2cap_chan_hold(chan); l2cap_chan_lock(chan); + __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } }
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 10c8d69f314d557d94d74ec492575ae6a4f1eb1c ]
If seq_file .next fuction does not change position index, read after some lseek can generate unexpected output.
In Aug 2018 NeilBrown noticed commit 1f4aace60b0e ("fs/seq_file.c: simplify seq_file iteration code and interface") "Some ->next functions do not increment *pos when they return NULL... Note that such ->next functions are buggy and should be fixed. A simple demonstration is
dd if=/proc/swaps bs=1000 skip=1
Choose any block size larger than the size of /proc/swaps. This will always show the whole last line of /proc/swaps"
Described problem is still actual. If you make lseek into middle of last output line following read will output end of last line and whole last line once again.
$ dd if=/proc/swaps bs=1 # usual output Filename Type Size Used Priority /dev/dm-0 partition 4194812 97536 -2 104+0 records in 104+0 records out 104 bytes copied
$ dd if=/proc/swaps bs=40 skip=1 # last line was generated twice dd: /proc/swaps: cannot skip to specified offset v/dm-0 partition 4194812 97536 -2 /dev/dm-0 partition 4194812 97536 -2 3+1 records in 3+1 records out 131 bytes copied
https://bugzilla.kernel.org/show_bug.cgi?id=206283
Link: http://lkml.kernel.org/r/bd8cfd7b-ac95-9b91-f9e7-e8438bd5047d@virtuozzo.com Signed-off-by: Vasily Averin vvs@virtuozzo.com Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Jann Horn jannh@google.com Cc: Alexander Viro viro@zeniv.linux.org.uk Cc: Kees Cook keescook@chromium.org Cc: Hugh Dickins hughd@google.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c index 891a3ef486511..646fd0a8e3202 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2737,10 +2737,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) else type = si->type + 1;
+ ++(*pos); for (; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; - ++*pos; return si; }
From: Steven Price steven.price@arm.com
[ Upstream commit c02a98753e0a36ba65a05818626fa6adeb4e7c97 ]
If walk_pte_range() is called with a 'end' argument that is beyond the last page of memory (e.g. ~0UL) then the comparison between 'addr' and 'end' will always fail and the loop will be infinite. Instead change the comparison to >= while accounting for overflow.
Link: http://lkml.kernel.org/r/20191218162402.45610-15-steven.price@arm.com Signed-off-by: Steven Price steven.price@arm.com Cc: Albert Ou aou@eecs.berkeley.edu Cc: Alexandre Ghiti alex@ghiti.fr Cc: Andy Lutomirski luto@kernel.org Cc: Ard Biesheuvel ard.biesheuvel@linaro.org Cc: Arnd Bergmann arnd@arndb.de Cc: Benjamin Herrenschmidt benh@kernel.crashing.org Cc: Borislav Petkov bp@alien8.de Cc: Catalin Marinas catalin.marinas@arm.com Cc: Christian Borntraeger borntraeger@de.ibm.com Cc: Dave Hansen dave.hansen@linux.intel.com Cc: David S. Miller davem@davemloft.net Cc: Heiko Carstens heiko.carstens@de.ibm.com Cc: "H. Peter Anvin" hpa@zytor.com Cc: Ingo Molnar mingo@redhat.com Cc: James Hogan jhogan@kernel.org Cc: James Morse james.morse@arm.com Cc: Jerome Glisse jglisse@redhat.com Cc: "Liang, Kan" kan.liang@linux.intel.com Cc: Mark Rutland mark.rutland@arm.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Paul Burton paul.burton@mips.com Cc: Paul Mackerras paulus@samba.org Cc: Paul Walmsley paul.walmsley@sifive.com Cc: Peter Zijlstra peterz@infradead.org Cc: Ralf Baechle ralf@linux-mips.org Cc: Russell King linux@armlinux.org.uk Cc: Thomas Gleixner tglx@linutronix.de Cc: Vasily Gorbik gor@linux.ibm.com Cc: Vineet Gupta vgupta@synopsys.com Cc: Will Deacon will@kernel.org Cc: Zong Li zong.li@sifive.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/pagewalk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index d48c2a986ea3f..4eb09e0898817 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -16,9 +16,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; - addr += PAGE_SIZE; - if (addr == end) + if (addr >= end - PAGE_SIZE) break; + addr += PAGE_SIZE; pte++; }
From: Hillf Danton hdanton@sina.com
[ Upstream commit 2a154903cec20fb64ff4d7d617ca53c16f8fd53a ]
Prefetch channel before killing sock in order to fix UAF like
BUG: KASAN: use-after-free in l2cap_sock_release+0x24c/0x290 net/bluetooth/l2cap_sock.c:1212 Read of size 8 at addr ffff8880944904a0 by task syz-fuzzer/9751
Reported-by: syzbot+c3c5bdea7863886115dc@syzkaller.appspotmail.com Fixes: 6c08fc896b60 ("Bluetooth: Fix refcount use-after-free issue") Cc: Manish Mandlik mmandlik@google.com Signed-off-by: Hillf Danton hdanton@sina.com Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/l2cap_sock.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index ab65304f3f637..390a9afab6473 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1193,6 +1193,7 @@ static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; + struct l2cap_chan *chan;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -1202,15 +1203,16 @@ static int l2cap_sock_release(struct socket *sock) bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, 2); + chan = l2cap_pi(sk)->chan;
- l2cap_chan_hold(l2cap_pi(sk)->chan); - l2cap_chan_lock(l2cap_pi(sk)->chan); + l2cap_chan_hold(chan); + l2cap_chan_lock(chan);
sock_orphan(sk); l2cap_sock_kill(sk);
- l2cap_chan_unlock(l2cap_pi(sk)->chan); - l2cap_chan_put(l2cap_pi(sk)->chan); + l2cap_chan_unlock(chan); + l2cap_chan_put(chan);
return err; }
From: Zhuang Yanying ann.zhuangyanying@huawei.com
[ Upstream commit 7df003c85218b5f5b10a7f6418208f31e813f38f ]
We are testing Virtual Machine with KSM on v5.4-rc2 kernel, and found the zero_page refcount overflow. The cause of refcount overflow is increased in try_async_pf (get_user_page) without being decreased in mmu_set_spte() while handling ept violation. In kvm_release_pfn_clean(), only unreserved page will call put_page. However, zero page is reserved. So, as well as creating and destroy vm, the refcount of zero page will continue to increase until it overflows.
step1: echo 10000 > /sys/kernel/pages_to_scan/pages_to_scan echo 1 > /sys/kernel/pages_to_scan/run echo 1 > /sys/kernel/pages_to_scan/use_zero_pages
step2: just create several normal qemu kvm vms. And destroy it after 10s. Repeat this action all the time.
After a long period of time, all domains hang because of the refcount of zero page overflow.
Qemu print error log as follow: … error: kvm run failed Bad address EAX=00006cdc EBX=00000008 ECX=80202001 EDX=078bfbfd ESI=ffffffff EDI=00000000 EBP=00000008 ESP=00006cc4 EIP=000efd75 EFL=00010002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0 ES =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] CS =0008 00000000 ffffffff 00c09b00 DPL=0 CS32 [-RA] SS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] DS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] FS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] GS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] LDT=0000 00000000 0000ffff 00008200 DPL=0 LDT TR =0000 00000000 0000ffff 00008b00 DPL=0 TSS32-busy GDT= 000f7070 00000037 IDT= 000f70ae 00000000 CR0=00000011 CR2=00000000 CR3=00000000 CR4=00000000 DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000 DR6=00000000ffff0ff0 DR7=0000000000000400 EFER=0000000000000000 Code=00 01 00 00 00 e9 e8 00 00 00 c7 05 4c 55 0f 00 01 00 00 00 <8b> 35 00 00 01 00 8b 3d 04 00 01 00 b8 d8 d3 00 00 c1 e0 08 0c ea a3 00 00 01 00 c7 05 04 …
Meanwhile, a kernel warning is departed.
[40914.836375] WARNING: CPU: 3 PID: 82067 at ./include/linux/mm.h:987 try_get_page+0x1f/0x30 [40914.836412] CPU: 3 PID: 82067 Comm: CPU 0/KVM Kdump: loaded Tainted: G OE 5.2.0-rc2 #5 [40914.836415] RIP: 0010:try_get_page+0x1f/0x30 [40914.836417] Code: 40 00 c3 0f 1f 84 00 00 00 00 00 48 8b 47 08 a8 01 75 11 8b 47 34 85 c0 7e 10 f0 ff 47 34 b8 01 00 00 00 c3 48 8d 78 ff eb e9 <0f> 0b 31 c0 c3 66 90 66 2e 0f 1f 84 00 0 0 00 00 00 48 8b 47 08 a8 [40914.836418] RSP: 0018:ffffb4144e523988 EFLAGS: 00010286 [40914.836419] RAX: 0000000080000000 RBX: 0000000000000326 RCX: 0000000000000000 [40914.836420] RDX: 0000000000000000 RSI: 00004ffdeba10000 RDI: ffffdf07093f6440 [40914.836421] RBP: ffffdf07093f6440 R08: 800000424fd91225 R09: 0000000000000000 [40914.836421] R10: ffff9eb41bfeebb8 R11: 0000000000000000 R12: ffffdf06bbd1e8a8 [40914.836422] R13: 0000000000000080 R14: 800000424fd91225 R15: ffffdf07093f6440 [40914.836423] FS: 00007fb60ffff700(0000) GS:ffff9eb4802c0000(0000) knlGS:0000000000000000 [40914.836425] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [40914.836426] CR2: 0000000000000000 CR3: 0000002f220e6002 CR4: 00000000003626e0 [40914.836427] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [40914.836427] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [40914.836428] Call Trace: [40914.836433] follow_page_pte+0x302/0x47b [40914.836437] __get_user_pages+0xf1/0x7d0 [40914.836441] ? irq_work_queue+0x9/0x70 [40914.836443] get_user_pages_unlocked+0x13f/0x1e0 [40914.836469] __gfn_to_pfn_memslot+0x10e/0x400 [kvm] [40914.836486] try_async_pf+0x87/0x240 [kvm] [40914.836503] tdp_page_fault+0x139/0x270 [kvm] [40914.836523] kvm_mmu_page_fault+0x76/0x5e0 [kvm] [40914.836588] vcpu_enter_guest+0xb45/0x1570 [kvm] [40914.836632] kvm_arch_vcpu_ioctl_run+0x35d/0x580 [kvm] [40914.836645] kvm_vcpu_ioctl+0x26e/0x5d0 [kvm] [40914.836650] do_vfs_ioctl+0xa9/0x620 [40914.836653] ksys_ioctl+0x60/0x90 [40914.836654] __x64_sys_ioctl+0x16/0x20 [40914.836658] do_syscall_64+0x5b/0x180 [40914.836664] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [40914.836666] RIP: 0033:0x7fb61cb6bfc7
Signed-off-by: LinFeng linfeng23@huawei.com Signed-off-by: Zhuang Yanying ann.zhuangyanying@huawei.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/kvm_main.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 278bdc53047e8..6624fbf37f9b9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -185,6 +185,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) */ if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)) && + !is_zero_pfn(pfn) && !kvm_is_zone_device_pfn(pfn);
return true;
From: Mohan Kumar mkumard@nvidia.com
[ Upstream commit 6d011d5057ff88ee556c000ac6fe0be23bdfcd72 ]
RIRB interrupt status getting cleared after the write pointer is read causes a race condition, where last response(s) into RIRB may remain unserviced by IRQ, eventually causing azx_rirb_get_response to fall back to polling mode. Clearing the RIRB interrupt status ahead of write pointer access ensures that this condition is avoided.
Signed-off-by: Mohan Kumar mkumard@nvidia.com Signed-off-by: Viswanath L viswanathl@nvidia.com Link: https://lore.kernel.org/r/1580983853-351-1-git-send-email-viswanathl@nvidia.... Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/pci/hda/hda_controller.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 76b507058cb4d..5e6081750bd9b 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1159,16 +1159,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) active = true;
- /* clear rirb int */ status = azx_readb(chip, RIRBSTS); if (status & RIRB_INT_MASK) { + /* + * Clearing the interrupt status here ensures that no + * interrupt gets masked after the RIRB wp is read in + * snd_hdac_bus_update_rirb. This avoids a possible + * race condition where codec response in RIRB may + * remain unserviced by IRQ, eventually falling back + * to polling mode in azx_rirb_get_response. + */ + azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) udelay(80); snd_hdac_bus_update_rirb(bus); } - azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); } } while (active && ++repeat < 10);
From: Qian Cai cai@lca.pw
[ Upstream commit 86b18aaa2b5b5bb48e609cd591b3d2d0fdbe0442 ]
sk_buff.qlen can be accessed concurrently as noticed by KCSAN,
BUG: KCSAN: data-race in __skb_try_recv_from_queue / unix_dgram_sendmsg
read to 0xffff8a1b1d8a81c0 of 4 bytes by task 5371 on cpu 96: unix_dgram_sendmsg+0x9a9/0xb70 include/linux/skbuff.h:1821 net/unix/af_unix.c:1761 ____sys_sendmsg+0x33e/0x370 ___sys_sendmsg+0xa6/0xf0 __sys_sendmsg+0x69/0xf0 __x64_sys_sendmsg+0x51/0x70 do_syscall_64+0x91/0xb47 entry_SYSCALL_64_after_hwframe+0x49/0xbe
write to 0xffff8a1b1d8a81c0 of 4 bytes by task 1 on cpu 99: __skb_try_recv_from_queue+0x327/0x410 include/linux/skbuff.h:2029 __skb_try_recv_datagram+0xbe/0x220 unix_dgram_recvmsg+0xee/0x850 ____sys_recvmsg+0x1fb/0x210 ___sys_recvmsg+0xa2/0xf0 __sys_recvmsg+0x66/0xf0 __x64_sys_recvmsg+0x51/0x70 do_syscall_64+0x91/0xb47 entry_SYSCALL_64_after_hwframe+0x49/0xbe
Since only the read is operating as lockless, it could introduce a logic bug in unix_recvq_full() due to the load tearing. Fix it by adding a lockless variant of skb_queue_len() and unix_recvq_full() where READ_ONCE() is on the read while WRITE_ONCE() is on the write similar to the commit d7d16a89350a ("net: add skb_queue_empty_lockless()").
Signed-off-by: Qian Cai cai@lca.pw Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/skbuff.h | 14 +++++++++++++- net/unix/af_unix.c | 11 +++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 955e1370f033d..7afbbc7eaa4f4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1816,6 +1816,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; }
+/** + * skb_queue_len_lockless - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + * This variant can be used in lockless contexts. + */ +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return READ_ONCE(list_->qlen); +} + /** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize @@ -2021,7 +2033,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev;
- list->qlen--; + WRITE_ONCE(list->qlen, list->qlen - 1); next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b3369d678f1af..ecadd9e482c46 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return unix_peer(osk) == NULL || unix_our_peer(sk, osk); }
-static inline int unix_recvq_full(struct sock const *sk) +static inline int unix_recvq_full(const struct sock *sk) { return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; }
+static inline int unix_recvq_full_lockless(const struct sock *sk) +{ + return skb_queue_len_lockless(&sk->sk_receive_queue) > + READ_ONCE(sk->sk_max_ack_backlog); +} + struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -1724,7 +1730,8 @@ restart_locked: * - unix_peer(sk) == sk by time of get but disconnected before lock */ if (other != sk && - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + unlikely(unix_peer(other) != sk && + unix_recvq_full_lockless(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo);
From: Trond Myklebust trondmy@gmail.com
[ Upstream commit a9ceb060b3cf37987b6162223575eaf4f4e0fc36 ]
perf does not know how to deal with a __builtin_bswap32() call, and complains. All other functions just store the xid etc in host endian form, so let's do that in the tracepoint for nfsd_file_acquire too.
Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Signed-off-by: J. Bruce Fields bfields@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfsd/trace.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index ffc78a0e28b24..b073bdc2e6e89 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -228,7 +228,7 @@ TRACE_EVENT(nfsd_file_acquire, TP_ARGS(rqstp, hash, inode, may_flags, nf, status),
TP_STRUCT__entry( - __field(__be32, xid) + __field(u32, xid) __field(unsigned int, hash) __field(void *, inode) __field(unsigned int, may_flags) @@ -236,11 +236,11 @@ TRACE_EVENT(nfsd_file_acquire, __field(unsigned long, nf_flags) __field(unsigned char, nf_may) __field(struct file *, nf_file) - __field(__be32, status) + __field(u32, status) ),
TP_fast_assign( - __entry->xid = rqstp->rq_xid; + __entry->xid = be32_to_cpu(rqstp->rq_xid); __entry->hash = hash; __entry->inode = inode; __entry->may_flags = may_flags; @@ -248,15 +248,15 @@ TRACE_EVENT(nfsd_file_acquire, __entry->nf_flags = nf ? nf->nf_flags : 0; __entry->nf_may = nf ? nf->nf_may : 0; __entry->nf_file = nf ? nf->nf_file : NULL; - __entry->status = status; + __entry->status = be32_to_cpu(status); ),
TP_printk("xid=0x%x hash=0x%x inode=0x%p may_flags=%s ref=%d nf_flags=%s nf_may=%s nf_file=0x%p status=%u", - be32_to_cpu(__entry->xid), __entry->hash, __entry->inode, + __entry->xid, __entry->hash, __entry->inode, show_nf_may(__entry->may_flags), __entry->nf_ref, show_nf_flags(__entry->nf_flags), show_nf_may(__entry->nf_may), __entry->nf_file, - be32_to_cpu(__entry->status)) + __entry->status) );
DECLARE_EVENT_CLASS(nfsd_file_search_class,
From: Tony Cheng tony.cheng@amd.com
[ Upstream commit 85e148fb963d27152a14e6d399a47aed9bc99c15 ]
[Why] these registers should have been double buffered. SW workaround we will have SW program the more aggressive (lower) values whenever we are upating this register, so we will not have underflow at expense of less optimzal request pattern.
[How] there is a driver bug where we don't check for 0, which is uninitialzed HW default. since 0 is smaller than any value we need to program, driver end up with not programming these registers
Signed-off-by: Tony Cheng tony.cheng@amd.com Reviewed-by: Yongqiang Sun yongqiang.sun@amd.com Acked-by: Bhawanpreet Lakha Bhawanpreet.Lakha@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 35 +++++++++++++------ 1 file changed, 25 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index a00af513aa2b0..c8f77bd0ce8a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -73,32 +73,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline( struct _vcs_dpi_display_dlg_regs_st *dlg_attr) { struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); - uint32_t cur_value; + uint32_t refcyc_per_vm_group_vblank; + uint32_t refcyc_per_vm_req_vblank; + uint32_t refcyc_per_vm_group_flip; + uint32_t refcyc_per_vm_req_flip; + const uint32_t uninitialized_hw_default = 0;
- REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_group_vblank) + REG_GET(VBLANK_PARAMETERS_5, + REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank); + + if (refcyc_per_vm_group_vblank == uninitialized_hw_default || + refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank) REG_SET(VBLANK_PARAMETERS_5, 0, REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6, - REFCYC_PER_VM_REQ_VBLANK, - &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_req_vblank) + REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank); + + if (refcyc_per_vm_req_vblank == uninitialized_hw_default || + refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank) REG_SET(VBLANK_PARAMETERS_6, 0, REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
- REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_group_flip) + REG_GET(FLIP_PARAMETERS_3, + REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip); + + if (refcyc_per_vm_group_flip == uninitialized_hw_default || + refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip) REG_SET(FLIP_PARAMETERS_3, 0, REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
- REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value); - if (cur_value > dlg_attr->refcyc_per_vm_req_flip) + REG_GET(FLIP_PARAMETERS_4, + REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip); + + if (refcyc_per_vm_req_flip == uninitialized_hw_default || + refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip) REG_SET(FLIP_PARAMETERS_4, 0, REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
REG_SET(FLIP_PARAMETERS_5, 0, REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c); + REG_SET(FLIP_PARAMETERS_6, 0, REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c); }
From: Steve Grubb sgrubb@redhat.com
[ Upstream commit 70b3eeed49e8190d97139806f6fbaf8964306cdb ]
Common Criteria calls out for any action that modifies the audit trail to be recorded. That usually is interpreted to mean insertion or removal of rules. It is not required to log modification of the inode information since the watch is still in effect. Additionally, if the rule is a never rule and the underlying file is one they do not want events for, they get an event for this bookkeeping update against their wishes.
Since no device/inode info is logged at insertion and no device/inode information is logged on update, there is nothing meaningful being communicated to the admin by the CONFIG_CHANGE updated_rules event. One can assume that the rule was not "modified" because it is still watching the intended target. If the device or inode cannot be resolved, then audit_panic is called which is sufficient.
The correct resolution is to drop logging config_update events since the watch is still in effect but just on another unknown inode.
Signed-off-by: Steve Grubb sgrubb@redhat.com Signed-off-by: Paul Moore paul@paul-moore.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/audit_watch.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4508d5e0cf696..8a8fd732ff6d0 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -302,8 +302,6 @@ static void audit_update_watch(struct audit_parent *parent, if (oentry->rule.exe) audit_remove_mark(oentry->rule.exe);
- audit_watch_log_rule_change(r, owatch, "updated_rules"); - call_rcu(&oentry->rcu, audit_free_rule_rcu); }
From: Vasily Averin vvs@virtuozzo.com
[ Upstream commit 8d269a8e2a8f0bca89022f4ec98de460acb90365 ]
If seq_file .next function does not change position index, read after some lseek can generate unexpected output.
$ dd if=/sys/fs/selinux/avc/cache_stats # usual output lookups hits misses allocations reclaims frees 817223 810034 7189 7189 6992 7037 1934894 1926896 7998 7998 7632 7683 1322812 1317176 5636 5636 5456 5507 1560571 1551548 9023 9023 9056 9115 0+1 records in 0+1 records out 189 bytes copied, 5,1564e-05 s, 3,7 MB/s
$# read after lseek to midle of last line $ dd if=/sys/fs/selinux/avc/cache_stats bs=180 skip=1 dd: /sys/fs/selinux/avc/cache_stats: cannot skip to specified offset 056 9115 <<<< end of last line 1560571 1551548 9023 9023 9056 9115 <<< whole last line once again 0+1 records in 0+1 records out 45 bytes copied, 8,7221e-05 s, 516 kB/s
$# read after lseek beyond end of of file $ dd if=/sys/fs/selinux/avc/cache_stats bs=1000 skip=1 dd: /sys/fs/selinux/avc/cache_stats: cannot skip to specified offset 1560571 1551548 9023 9023 9056 9115 <<<< generates whole last line 0+1 records in 0+1 records out 36 bytes copied, 9,0934e-05 s, 396 kB/s
https://bugzilla.kernel.org/show_bug.cgi?id=206283
Signed-off-by: Vasily Averin vvs@virtuozzo.com Acked-by: Stephen Smalley sds@tycho.nsa.gov Signed-off-by: Paul Moore paul@paul-moore.com Signed-off-by: Sasha Levin sashal@kernel.org --- security/selinux/selinuxfs.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index e6c7643c3fc08..e9eaff90cbccd 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1508,6 +1508,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) *idx = cpu + 1; return &per_cpu(avc_cache_stats, cpu); } + (*idx)++; return NULL; }
From: James Smart jsmart2021@gmail.com
[ Upstream commit 39c4f1a965a9244c3ba60695e8ff8da065ec6ac4 ]
The driver is occasionally seeing the following SLI Port error, requiring reset and reinit:
Port Status Event: ... error 1=0x52004a01, error 2=0x218
The failure means an RQ timeout. That is, the adapter had received asynchronous receive frames, ran out of buffer slots to place the frames, and the driver did not replenish the buffer slots before a timeout occurred. The driver should not be so slow in replenishing buffers that a timeout can occur.
When the driver received all the frames of a sequence, it allocates an IOCB to put the frames in. In a situation where there was no IOCB available for the frame of a sequence, the RQ buffer corresponding to the first frame of the sequence was not returned to the FW. Eventually, with enough traffic encountering the situation, the timeout occurred.
Fix by releasing the buffer back to firmware whenever there is no IOCB for the first frame.
[mkp: typo]
Link: https://lore.kernel.org/r/20200128002312.16346-2-jsmart2021@gmail.com Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc_sli.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a951e1c8165ed..e2877d2b3cc0d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -17866,6 +17866,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) list_add_tail(&iocbq->list, &first_iocbq->list); } } + /* Free the sequence's header buffer */ + if (!first_iocbq) + lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); + return first_iocbq; }
From: James Smart jsmart2021@gmail.com
[ Upstream commit 821bc882accaaaf1bbecf5c0ecef659443e3e8cb ]
When performing reset testing, the eq's list for related hwqs was getting corrupted. In cases where there is not a 1:1 eq to hwq, the eq is shared. The eq maintains a list of hwqs utilizing it in case of cpu offlining and polling. During the reset, the hwqs are being torn down so they can be recreated. The recreation was getting confused by seeing a non-null eq assignment on the eq and the eq list became corrupt.
Correct by clearing the hdwq eq assignment when the hwq is cleaned up.
Link: https://lore.kernel.org/r/20200128002312.16346-6-jsmart2021@gmail.com Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc_init.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 95abffd9ad100..d4c83eca0ad2c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -9124,6 +9124,7 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba) /* Free the CQ/WQ corresponding to the Hardware Queue */ lpfc_sli4_queue_free(hdwq[idx].io_cq); lpfc_sli4_queue_free(hdwq[idx].io_wq); + hdwq[idx].hba_eq = NULL; hdwq[idx].io_cq = NULL; hdwq[idx].io_wq = NULL; if (phba->cfg_xpsgl && !phba->nvmet_support)
From: James Smart jsmart2021@gmail.com
[ Upstream commit 4cb9e1ddaa145be9ed67b6a7de98ca705a43f998 ]
Coverity reported a memory corruption error for the fdmi attributes routines:
CID 15768 [Memory Corruption] Out-of-bounds access on FDMI
Sloppy coding of the fmdi structures. In both the lpfc_fdmi_attr_def and lpfc_fdmi_reg_port_list structures, a field was placed at the start of payload that may have variable content. The field was given an arbitrary type (uint32_t). The code then uses the field name to derive an address, which it used in things such as memset and memcpy. The memset sizes or memcpy lengths were larger than the arbitrary type, thus coverity reported an error.
Fix by replacing the arbitrary fields with the real field structures describing the payload.
Link: https://lore.kernel.org/r/20200128002312.16346-8-jsmart2021@gmail.com Signed-off-by: Dick Kennedy dick.kennedy@broadcom.com Signed-off-by: James Smart jsmart2021@gmail.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/lpfc/lpfc_ct.c | 137 ++++++++++++++++++------------------ drivers/scsi/lpfc/lpfc_hw.h | 36 +++++----- 2 files changed, 85 insertions(+), 88 deletions(-)
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 4a09f21cb235f..e672fa9e842c9 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -2056,8 +2056,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2073,8 +2073,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
/* This string MUST be consistent with other FC platforms * supported by Broadcom. @@ -2098,8 +2098,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->SerialNumber, sizeof(ae->un.AttrString)); @@ -2120,8 +2120,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -2141,8 +2141,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelDesc, sizeof(ae->un.AttrString)); @@ -2164,8 +2164,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t i, j, incr, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
/* Convert JEDEC ID to ascii for hardware version */ incr = vp->rev.biuRev; @@ -2194,8 +2194,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, lpfc_release_version, sizeof(ae->un.AttrString)); @@ -2216,8 +2216,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
if (phba->sli_rev == LPFC_SLI_REV4) lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); @@ -2241,8 +2241,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); len = strnlen(ae->un.AttrString, @@ -2261,8 +2261,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", init_utsname()->sysname, @@ -2284,7 +2284,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); size = FOURBYTES + sizeof(uint32_t); @@ -2300,8 +2300,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_node_name(vport, ae->un.AttrString, 256); @@ -2319,7 +2319,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
/* Nothing is defined for this currently */ ae->un.AttrInt = cpu_to_be32(0); @@ -2336,7 +2336,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
/* Each driver instance corresponds to a single port */ ae->un.AttrInt = cpu_to_be32(1); @@ -2353,8 +2353,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, sizeof(struct lpfc_name)); @@ -2372,8 +2372,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strlcat(ae->un.AttrString, phba->BIOSVersion, sizeof(ae->un.AttrString)); @@ -2393,7 +2393,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
/* Driver doesn't have access to this information */ ae->un.AttrInt = cpu_to_be32(0); @@ -2410,8 +2410,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "EMULEX", sizeof(ae->un.AttrString)); @@ -2433,8 +2433,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ @@ -2459,7 +2459,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
ae->un.AttrInt = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { @@ -2513,7 +2513,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
if (!(phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { @@ -2583,7 +2583,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam; ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | @@ -2603,8 +2603,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "/sys/class/scsi_host/host%d", shost->host_no); @@ -2624,8 +2624,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", vport->phba->os_host_name); @@ -2645,8 +2645,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2663,8 +2663,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); @@ -2681,8 +2681,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); len += (len & 3) ? (4 - (len & 3)) : 4; @@ -2700,7 +2700,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); else @@ -2718,7 +2718,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2733,8 +2733,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrWWN, &vport->fabric_portname, sizeof(struct lpfc_name)); @@ -2751,8 +2751,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ @@ -2775,7 +2775,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Link Up - operational */ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); size = FOURBYTES + sizeof(uint32_t); @@ -2791,7 +2791,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; vport->fdmi_num_disc = lpfc_find_map_node(vport); ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); size = FOURBYTES + sizeof(uint32_t); @@ -2807,7 +2807,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2822,8 +2822,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Initiator", sizeof(ae->un.AttrString)); @@ -2843,8 +2843,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2864,8 +2864,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, "Smart SAN Version 2.0", sizeof(ae->un.AttrString)); @@ -2886,8 +2886,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae));
strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -2906,7 +2906,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue;
/* SRIOV (type 3) is not supported */ if (vport->vpi) @@ -2926,7 +2926,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(0); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2941,7 +2941,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size;
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(1); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -3089,7 +3089,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Registered Port List */ /* One entry (port) per adapter */ rh->rpl.EntryCnt = cpu_to_be32(1); - memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, + memcpy(&rh->rpl.pe.PortName, + &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name));
/* point to the HBA attribute block */ diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 436cdc8c5ef46..b5642c8725938 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1340,25 +1340,8 @@ struct fc_rdp_res_frame { /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
-/* - * Registered Port List Format - */ -struct lpfc_fdmi_reg_port_list { - uint32_t EntryCnt; - uint32_t pe; /* Variable-length array */ -}; - - /* Definitions for HBA / Port attribute entries */
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */ - /* Structure is in Big Endian format */ - uint32_t AttrType:16; - uint32_t AttrLen:16; - uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ -}; - - /* Attribute Entry */ struct lpfc_fdmi_attr_entry { union { @@ -1369,7 +1352,13 @@ struct lpfc_fdmi_attr_entry { } un; };
-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) +struct lpfc_fdmi_attr_def { /* Defined in TLV format */ + /* Structure is in Big Endian format */ + uint32_t AttrType:16; + uint32_t AttrLen:16; + /* Marks start of Value (ATTRIBUTE_ENTRY) */ + struct lpfc_fdmi_attr_entry AttrValue; +} __packed;
/* * HBA Attribute Block @@ -1393,13 +1382,20 @@ struct lpfc_fdmi_hba_ident { struct lpfc_name PortName; };
+/* + * Registered Port List Format + */ +struct lpfc_fdmi_reg_port_list { + uint32_t EntryCnt; + struct lpfc_fdmi_port_entry pe; +} __packed; + /* * Register HBA(RHBA) */ struct lpfc_fdmi_reg_hba { struct lpfc_fdmi_hba_ident hi; - struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ -/* struct lpfc_fdmi_attr_block ab; */ + struct lpfc_fdmi_reg_port_list rpl; };
/*
From: Wen Yang wen.yang99@zte.com.cn
[ Upstream commit 47340e46f34a3b1d80e40b43ae3d7a8da34a3541 ]
The call to of_find_matching_node returns a node pointer with refcount incremented thus it must be explicitly decremented after the last usage.
Detected by coccinelle with the following warnings: drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:212:2-8: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 209, but without a corresponding object release within this function. drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:237:1-7: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 209, but without a corresponding object release within this function.
Signed-off-by: Wen Yang wen.yang99@zte.com.cn Reviewed-by: Laurent Pinchart laurent.pinchart@ideasonboard.com Reviewed-by: Mukesh Ojha mojha@codeaurora.org Cc: Tomi Valkeinen tomi.valkeinen@ti.com Cc: David Airlie airlied@linux.ie Cc: Daniel Vetter daniel@ffwll.ch Cc: Sebastian Reichel sebastian.reichel@collabora.com Cc: Laurent Pinchart laurent.pinchart@ideasonboard.com Cc: dri-devel@lists.freedesktop.org Cc: linux-kernel@vger.kernel.org Cc: Markus Elfring Markus.Elfring@web.de Signed-off-by: Tomi Valkeinen tomi.valkeinen@ti.com Link: https://patchwork.freedesktop.org/patch/msgid/1554692313-28882-2-git-send-em... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 31502857f013d..ce67891eedd46 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void) dss = of_find_matching_node(NULL, omapdss_of_match);
if (dss == NULL || !of_device_is_available(dss)) - return 0; + goto put_node;
omapdss_walk_device(dss, true);
@@ -217,6 +217,8 @@ static int __init omapdss_boot_init(void) kfree(n); }
+put_node: + of_node_put(dss); return 0; }
From: Waiman Long longman@redhat.com
[ Upstream commit b3b9c187dc2544923a601733a85352b9ddaba9b3 ]
There are currently three counters to track the IRQ context of a lock chain - nr_hardirq_chains, nr_softirq_chains and nr_process_chains. They are incremented when a new lock chain is added, but they are not decremented when a lock chain is removed. That causes some of the statistic counts reported by /proc/lockdep_stats to be incorrect. IRQ Fix that by decrementing the right counter when a lock chain is removed.
Since inc_chains() no longer accesses hardirq_context and softirq_context directly, it is moved out from the CONFIG_TRACE_IRQFLAGS conditional compilation block.
Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use") Signed-off-by: Waiman Long longman@redhat.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Link: https://lkml.kernel.org/r/20200206152408.24165-2-longman@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/locking/lockdep.c | 40 +++++++++++++++++------------- kernel/locking/lockdep_internals.h | 6 +++++ 2 files changed, 29 insertions(+), 17 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 9ab1a965c3b92..bca0f7f71cde4 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2302,18 +2302,6 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, return 0; }
-static void inc_chains(void) -{ - if (current->hardirq_context) - nr_hardirq_chains++; - else { - if (current->softirq_context) - nr_softirq_chains++; - else - nr_process_chains++; - } -} - #else
static inline int check_irq_usage(struct task_struct *curr, @@ -2321,13 +2309,27 @@ static inline int check_irq_usage(struct task_struct *curr, { return 1; } +#endif /* CONFIG_TRACE_IRQFLAGS */
-static inline void inc_chains(void) +static void inc_chains(int irq_context) { - nr_process_chains++; + if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) + nr_hardirq_chains++; + else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) + nr_softirq_chains++; + else + nr_process_chains++; }
-#endif /* CONFIG_TRACE_IRQFLAGS */ +static void dec_chains(int irq_context) +{ + if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT) + nr_hardirq_chains--; + else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT) + nr_softirq_chains--; + else + nr_process_chains--; +}
static void print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) @@ -2847,7 +2849,7 @@ static inline int add_chain_cache(struct task_struct *curr,
hlist_add_head_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); - inc_chains(); + inc_chains(chain->irq_context);
return 1; } @@ -3600,7 +3602,8 @@ lock_used:
static inline unsigned int task_irq_context(struct task_struct *task) { - return 2 * !!task->hardirq_context + !!task->softirq_context; + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context + + LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; }
static int separate_irq_context(struct task_struct *curr, @@ -4805,6 +4808,8 @@ recalc: return; /* Overwrite the chain key for concurrent RCU readers. */ WRITE_ONCE(chain->chain_key, chain_key); + dec_chains(chain->irq_context); + /* * Note: calling hlist_del_rcu() from inside a * hlist_for_each_entry_rcu() loop is safe. @@ -4826,6 +4831,7 @@ recalc: } *new_chain = *chain; hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); + inc_chains(new_chain->irq_context); #endif }
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 18d85aebbb57f..a525368b8cf61 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -106,6 +106,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #define STACK_TRACE_HASH_SIZE 16384 #endif
+/* + * Bit definitions for lock_chain.irq_context + */ +#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0) +#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1) + #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
From: Dinh Nguyen dinguyen@kernel.org
[ Upstream commit cc26ed7be46c5f5fa45f3df8161ed7ca3c4d318c ]
do_div() macro to perform u64 division and guards against overflow if the result is too large for the unsigned long return type.
Signed-off-by: Dinh Nguyen dinguyen@kernel.org Link: https://lkml.kernel.org/r/20200114160726.19771-1-dinguyen@kernel.org Signed-off-by: Stephen Boyd sboyd@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/socfpga/clk-pll-s10.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 4705eb544f01b..8d7b1d0c46643 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* read VCO1 reg for numerator and denominator */ reg = readl(socfpgaclk->hw.reg); refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate / refdiv; + + vco_freq = parent_rate; + do_div(vco_freq, refdiv);
/* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4);
From: Ayush Sawal ayush.sawal@chelsio.com
[ Upstream commit 9195189e00a7db55e7d448cee973cae87c5a3c71 ]
The libkcapi test which causes kernel panic is aead asynchronous vmsplice multiple test.
./bin/kcapi -v -d 4 -x 10 -c "ccm(aes)" -q 4edb58e8d5eb6bc711c43a6f3693daebde2e5524f1b55297abb29f003236e43d -t a7877c99 -n 674742abd0f5ba -k 2861fd0253705d7875c95ba8a53171b4 -a fb7bc304a3909e66e2e0c5ef952712dd884ce3e7324171369f2c5db1adc48c7d
This patch avoids dma_mapping of a zero length sg which causes the panic, by using sg_nents_for_len which maps only upto a specific length
Signed-off-by: Ayush Sawal ayush.sawal@chelsio.com Signed-off-by: Herbert Xu herbert@gondor.apana.org.au Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/crypto/chelsio/chcr_algo.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index fe2eadc0ce83d..2d30ed5a2674b 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2480,8 +2480,9 @@ int chcr_aead_dma_map(struct device *dev, else reqctx->b0_dma = 0; if (req->src == req->dst) { - error = dma_map_sg(dev, req->src, sg_nents(req->src), - DMA_BIDIRECTIONAL); + error = dma_map_sg(dev, req->src, + sg_nents_for_len(req->src, dst_size), + DMA_BIDIRECTIONAL); if (!error) goto err; } else {
From: Felix Fietkau nbd@nbd.name
[ Upstream commit 9379df2fd9234e3b67a23101c2370c99f6af6d77 ]
During the cleanup of the aggregation session, a rx handler (or release timer) on another CPU might still hold a pointer to the reorder buffer and could attempt to release some packets. Clearing pointers during cleanup avoids a theoretical use-after-free bug here.
Signed-off-by: Felix Fietkau nbd@nbd.name Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index cbff0dfc96311..f8441fd65400c 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -268,6 +268,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) if (!skb) continue;
+ tid->reorder_buf[i] = NULL; tid->nframes--; dev_kfree_skb(skb); }
From: Felix Fietkau nbd@nbd.name
[ Upstream commit 93eaec7625f13cffb593b471405b017c7e64d4ee ]
Fixes a theoretical issue where it could potentially overwrite an existing descriptor entry (and leaking its skb)
Signed-off-by: Felix Fietkau nbd@nbd.name Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/mediatek/mt76/dma.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6249a46c19762..026d996612fbe 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -261,10 +261,13 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, struct mt76_queue_buf buf; dma_addr_t addr;
+ if (q->queued + 1 >= q->ndesc - 1) + goto error; + addr = dma_map_single(dev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev->dev, addr))) - return -ENOMEM; + goto error;
buf.addr = addr; buf.len = skb->len; @@ -275,6 +278,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, spin_unlock_bh(&q->lock);
return 0; + +error: + dev_kfree_skb(skb); + return -ENOMEM; }
static int
From: Takashi Iwai tiwai@suse.de
[ Upstream commit e9a0ef0b5ddcbc0d56c65aefc0f18d16e6f71207 ]
Some USB-audio descriptors provide a bogus volume range (e.g. volume min and max are identical), which confuses user-space. This patch makes the driver skipping such a control element.
BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206221 Link: https://lore.kernel.org/r/20200214144928.23628-1-tiwai@suse.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/usb/mixer.c | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9079c380228fc..8aa96ed0b1b56 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1684,6 +1684,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer, /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl);
+ /* skip a bogus volume range */ + if (cval->max <= cval->min) { + usb_audio_dbg(mixer->chip, + "[%d] FU [%s] skipped due to invalid volume\n", + cval->head.id, kctl->id.name); + snd_ctl_free_one(kctl); + return; + } + + if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) {
From: Thomas Richter tmricht@linux.ibm.com
[ Upstream commit 2bbc83537614517730e9f2811195004b712de207 ]
This test places a kprobe to function getname_flags() in the kernel which has the following prototype:
struct filename *getname_flags(const char __user *filename, int flags, int *empty)
The 'filename' argument points to a filename located in user space memory.
Looking at commit 88903c464321c ("tracing/probe: Add ustring type for user-space string") the kprobe should indicate that user space memory is accessed.
Output before:
[root@m35lp76 perf]# ./perf test 66 67 66: Use vfs_getname probe to get syscall args filenames : FAILED! 67: Check open filename arg using perf trace + vfs_getname: FAILED! [root@m35lp76 perf]#
Output after:
[root@m35lp76 perf]# ./perf test 66 67 66: Use vfs_getname probe to get syscall args filenames : Ok 67: Check open filename arg using perf trace + vfs_getname: Ok [root@m35lp76 perf]#
Comments from Masami Hiramatsu:
This bug doesn't happen on x86 or other archs on which user address space and kernel address space is the same. On some arches (ppc64 in this case?) user address space is partially or completely the same as kernel address space.
(Yes, they switch the world when running into the kernel) In this case, we need to use different data access functions for each space.
That is why I introduced the "ustring" type for kprobe events.
As far as I can see, Thomas's patch is sane. Thomas, could you show us your result on your test environment?
Comments from Thomas Richter:
Test results for s/390 included above.
Signed-off-by: Thomas Richter tmricht@linux.ibm.com Acked-by: Masami Hiramatsu mhiramat@kernel.org Tested-by: Arnaldo Carvalho de Melo acme@redhat.com Cc: Heiko Carstens heiko.carstens@de.ibm.com Cc: Sumanth Korikkar sumanthk@linux.ibm.com Cc: Vasily Gorbik gor@linux.ibm.com Link: http://lore.kernel.org/lkml/20200217102111.61137-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/tests/shell/lib/probe_vfs_getname.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 7cb99b433888b..c2cc42daf9242 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -14,7 +14,7 @@ add_probe_vfs_getname() { if [ $had_vfs_getname -eq 1 ] ; then line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" fi }
From: Bart Van Assche bvanassche@acm.org
[ Upstream commit fb3063d31995cc4cf1d47a406bb61d6fb1b1d58d ]
From the comment above the definition of the roundup_pow_of_two() macro:
The result is undefined when n == 0.
Hence only pass positive values to roundup_pow_of_two(). This patch fixes the following UBSAN complaint:
UBSAN: Undefined behaviour in ./include/linux/log2.h:57:13 shift exponent 64 is too large for 64-bit type 'long unsigned int' Call Trace: dump_stack+0xa5/0xe6 ubsan_epilogue+0x9/0x26 __ubsan_handle_shift_out_of_bounds.cold+0x4c/0xf9 rxe_qp_from_attr.cold+0x37/0x5d [rdma_rxe] rxe_modify_qp+0x59/0x70 [rdma_rxe] _ib_modify_qp+0x5aa/0x7c0 [ib_core] ib_modify_qp+0x3b/0x50 [ib_core] cma_modify_qp_rtr+0x234/0x260 [rdma_cm] __rdma_accept+0x1a7/0x650 [rdma_cm] nvmet_rdma_cm_handler+0x1286/0x14cd [nvmet_rdma] cma_cm_event_handler+0x6b/0x330 [rdma_cm] cma_ib_req_handler+0xe60/0x22d0 [rdma_cm] cm_process_work+0x30/0x140 [ib_cm] cm_req_handler+0x11f4/0x1cd0 [ib_cm] cm_work_handler+0xb8/0x344e [ib_cm] process_one_work+0x569/0xb60 worker_thread+0x7a/0x5d0 kthread+0x1e6/0x210 ret_from_fork+0x24/0x30
Link: https://lore.kernel.org/r/20200217205714.26937-1-bvanassche@acm.org Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: Bart Van Assche bvanassche@acm.org Reviewed-by: Leon Romanovsky leonro@mellanox.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/sw/rxe/rxe_qp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index e2c6d1cedf416..f85273883794b 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -592,15 +592,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, int err;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) { - int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); + int max_rd_atomic = attr->max_rd_atomic ? + roundup_pow_of_two(attr->max_rd_atomic) : 0;
qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); }
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { - int max_dest_rd_atomic = - __roundup_pow_of_two(attr->max_dest_rd_atomic); + int max_dest_rd_atomic = attr->max_dest_rd_atomic ? + roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit 147f1a1fe5d7e6b01b8df4d0cbd6f9eaf6b6c73b ]
The "u" field in the event has three states, -1/0/1. Using u8 however means that comparison with -1 will always fail, so change to signed char.
Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/mmutrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 3c6522b84ff11..ffcd96fc02d0a 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -339,7 +339,7 @@ TRACE_EVENT( /* These depend on page entry type, so compute them now. */ __field(bool, r) __field(bool, x) - __field(u8, u) + __field(signed char, u) ),
TP_fast_assign(
From: wanpeng li wanpengli@tencent.com
[ Upstream commit c9dfd3fb08352d439f0399b6fabe697681d2638c ]
For the duration of mapping eVMCS, it derefences ->memslots without holding ->srcu or ->slots_lock when accessing hv assist page. This patch fixes it by moving nested_sync_vmcs12_to_shadow to prepare_guest_switch, where the SRCU is already taken.
It can be reproduced by running kvm's evmcs_test selftest.
============================= warning: suspicious rcu usage 5.6.0-rc1+ #53 tainted: g w ioe ----------------------------- ./include/linux/kvm_host.h:623 suspicious rcu_dereference_check() usage!
other info that might help us debug this:
rcu_scheduler_active = 2, debug_locks = 1 1 lock held by evmcs_test/8507: #0: ffff9ddd156d00d0 (&vcpu->mutex){+.+.}, at: kvm_vcpu_ioctl+0x85/0x680 [kvm]
stack backtrace: cpu: 6 pid: 8507 comm: evmcs_test tainted: g w ioe 5.6.0-rc1+ #53 hardware name: dell inc. optiplex 7040/0jctf8, bios 1.4.9 09/12/2016 call trace: dump_stack+0x68/0x9b kvm_read_guest_cached+0x11d/0x150 [kvm] kvm_hv_get_assist_page+0x33/0x40 [kvm] nested_enlightened_vmentry+0x2c/0x60 [kvm_intel] nested_vmx_handle_enlightened_vmptrld.part.52+0x32/0x1c0 [kvm_intel] nested_sync_vmcs12_to_shadow+0x439/0x680 [kvm_intel] vmx_vcpu_run+0x67a/0xe60 [kvm_intel] vcpu_enter_guest+0x35e/0x1bc0 [kvm] kvm_arch_vcpu_ioctl_run+0x40b/0x670 [kvm] kvm_vcpu_ioctl+0x370/0x680 [kvm] ksys_ioctl+0x235/0x850 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x77/0x780 entry_syscall_64_after_hwframe+0x49/0xbe
Signed-off-by: Wanpeng Li wanpengli@tencent.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/vmx/vmx.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3be65495aeb8a..a071eab3bab74 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1130,6 +1130,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) vmx->guest_msrs[i].mask);
} + + if (vmx->nested.need_vmcs12_to_shadow_sync) + nested_sync_vmcs12_to_shadow(vcpu); + if (vmx->guest_state_loaded) return;
@@ -6485,8 +6489,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_write32(PLE_WINDOW, vmx->ple_window); }
- if (vmx->nested.need_vmcs12_to_shadow_sync) - nested_sync_vmcs12_to_shadow(vcpu); + /* + * We did this in prepare_switch_to_guest, because it needs to + * be within srcu_read_lock. + */ + WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
From: Amelie Delaunay amelie.delaunay@st.com
[ Upstream commit dfc708812a2acfc0ca56f56233b3c3e7b0d4ffe7 ]
To avoid race with vchan_complete, use the race free way to terminate running transfer.
Move vdesc->node list_del in stm32_mdma_start_transfer instead of in stm32_mdma_xfer_end to avoid another race in vchan_dma_desc_free_list.
Signed-off-by: Amelie Delaunay amelie.delaunay@st.com Link: https://lore.kernel.org/r/20200127085334.13163-7-amelie.delaunay@st.com Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dma/stm32-mdma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 5838311cf9900..ee1cbf3be75d5 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1127,6 +1127,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) return; }
+ list_del(&vdesc->node); + chan->desc = to_stm32_mdma_desc(vdesc); hwdesc = chan->desc->node[0].hwdesc; chan->curr_hwdesc = 0; @@ -1242,8 +1244,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c) LIST_HEAD(head);
spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_mdma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_mdma_stop(chan); chan->desc = NULL; } vchan_get_all_descriptors(&chan->vchan, &head); @@ -1331,7 +1335,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; chan->busy = false;
From: Dan Carpenter dan.carpenter@oracle.com
[ Upstream commit ef0ed05dcef8a74178a8b480cce23a377b1de2b8 ]
There was supposed to be a "ret = " assignment here, otherwise the error handling on the next line won't work.
Fixes: 64b5a49df486 ("[media] media: imx: Add Capture Device Interface") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com Reviewed-by: Steve Longerbeam slongerbeam@gmail.com Signed-off-by: Hans Verkuil hverkuil-cisco@xs4all.nl Signed-off-by: Mauro Carvalho Chehab mchehab+huawei@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/staging/media/imx/imx-media-capture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index 46576e32581f0..d151cd6d31884 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -785,7 +785,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) /* setup default format */ fmt_src.pad = priv->src_sd_pad; fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; - v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); + ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); if (ret) { v4l2_err(sd, "failed to get src_sd format\n"); goto unreg;
From: Dave Hansen dave.hansen@linux.intel.com
[ Upstream commit 16171bffc829272d5e6014bad48f680cb50943d9 ]
Alex Shi reported the pkey macros above arch_set_user_pkey_access() to be unused. They are unused, and even refer to a nonexistent CONFIG option.
But, they might have served a good use, which was to ensure that the code does not try to set values that would not fit in the PKRU register. As it stands, a too-large 'pkey' value would be likely to silently overflow the u32 new_pkru_bits.
Add a check to look for overflows. Also add a comment to remind any future developer to closely examine the types used to store pkey values if arch_max_pkey() ever changes.
This boots and passes the x86 pkey selftests.
Reported-by: Alex Shi alex.shi@linux.alibaba.com Signed-off-by: Dave Hansen dave.hansen@intel.com Signed-off-by: Borislav Petkov bp@suse.de Link: https://lkml.kernel.org/r/20200122165346.AD4DA150@viggo.jf.intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/include/asm/pkeys.h | 5 +++++ arch/x86/kernel/fpu/xstate.c | 9 +++++++-- 2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 19b137f1b3beb..2ff9b98812b76 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -4,6 +4,11 @@
#define ARCH_DEFAULT_PKEY 0
+/* + * If more than 16 keys are ever supported, a thorough audit + * will be necessary to ensure that the types that store key + * numbers and masks have sufficient capacity. + */ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 755eb26cbec04..735d1f1bbabc7 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -895,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr)
#ifdef CONFIG_ARCH_HAS_PKEYS
-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) -#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. @@ -915,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return -EINVAL;
+ /* + * This code should only be called with valid 'pkey' + * values originating from in-kernel users. Complain + * if a bad value is observed. + */ + WARN_ON_ONCE(pkey >= arch_max_pkey()); + /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) new_pkru_bits |= PKRU_AD_BIT;
From: Thomas Gleixner tglx@linutronix.de
[ Upstream commit 8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0 ]
If an element is freed via RCU then recursion into BPF instrumentation functions is not a concern. The element is already detached from the map and the RCU callback does not hold any locks on which a kprobe, perf event or tracepoint attached BPF program could deadlock.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Signed-off-by: Alexei Starovoitov ast@kernel.org Link: https://lore.kernel.org/bpf/20200224145643.259118710@linutronix.de Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/bpf/hashtab.c | 8 -------- 1 file changed, 8 deletions(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 039d64b1bfb7d..728ffec52cf36 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -664,15 +664,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) struct htab_elem *l = container_of(head, struct htab_elem, rcu); struct bpf_htab *htab = l->htab;
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while - * we're calling kfree, otherwise deadlock is possible if kprobes - * are placed somewhere inside of slub - */ - preempt_disable(); - __this_cpu_inc(bpf_prog_active); htab_elem_free(htab, l); - __this_cpu_dec(bpf_prog_active); - preempt_enable(); }
static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
From: Amelie Delaunay amelie.delaunay@st.com
[ Upstream commit d80cbef35bf89b763f06e03bb4ff8f933bf012c5 ]
To avoid race with vchan_complete, use the race free way to terminate running transfer.
Move vdesc->node list_del in stm32_dma_start_transfer instead of in stm32_mdma_chan_complete to avoid another race in vchan_dma_desc_free_list.
Signed-off-by: Amelie Delaunay amelie.delaunay@st.com Link: https://lore.kernel.org/r/20200129153628.29329-9-amelie.delaunay@st.com Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dma/stm32-dma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 5989b08935211..6c5771de32c67 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -488,8 +488,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
- if (chan->busy) { - stm32_dma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_dma_stop(chan); chan->desc = NULL; }
@@ -545,6 +547,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) if (!vdesc) return;
+ list_del(&vdesc->node); + chan->desc = to_stm32_dma_desc(vdesc); chan->next_sg = 0; } @@ -622,7 +626,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) } else { chan->busy = false; if (chan->next_sg == chan->desc->num_sgs) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; }
From: Dmitry Osipenko digetx@gmail.com
[ Upstream commit 8e84172e372bdca20c305d92d51d33640d2da431 ]
It's incorrect to check the channel's "busy" state without taking a lock. That shouldn't cause any real troubles, nevertheless it's always better not to have any race conditions in the code.
Signed-off-by: Dmitry Osipenko digetx@gmail.com Acked-by: Jon Hunter jonathanh@nvidia.com Link: https://lore.kernel.org/r/20200209163356.6439-5-digetx@gmail.com Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/dma/tegra20-apb-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 4a750e29bfb53..3fe27dbde5b2b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1287,8 +1287,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
- if (tdc->busy) - tegra_dma_terminate_all(dc); + tegra_dma_terminate_all(dc);
spin_lock_irqsave(&tdc->lock, flags); list_splice_init(&tdc->pending_sg_req, &sg_req_list);
From: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com
[ Upstream commit dff70572e9a3a1a01d9dbc2279faa784d95f41b6 ]
Before removing the slave device, disable pm_runtime to prevent any race condition with the resume being executed after the bus and slave devices are removed.
Since this pm_runtime_disable() is handled in common routines, implementations of Slave drivers do not need to call it in their .remove() routine.
Signed-off-by: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com Link: https://lore.kernel.org/r/20200115000844.14695-8-pierre-louis.bossart@linux.... Signed-off-by: Vinod Koul vkoul@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/soundwire/bus.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index fc53dbe57f854..a90963812357c 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -113,6 +113,8 @@ static int sdw_delete_slave(struct device *dev, void *data) struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_bus *bus = slave->bus;
+ pm_runtime_disable(dev); + sdw_slave_debugfs_exit(slave);
mutex_lock(&bus->bus_lock);
From: Aric Cyr aric.cyr@amd.com
[ Upstream commit 6a6c4a4d459ecacc9013c45dcbf2bc9747fdbdbd ]
[Why] Since the i2c payload allocation can fail need to check return codes
[How] Clean up i2c payload allocations and check for errors
Signed-off-by: Aric Cyr aric.cyr@amd.com Reviewed-by: Joshua Aberback Joshua.Aberback@amd.com Acked-by: Rodrigo Siqueira Rodrigo.Siqueira@amd.com Acked-by: Harry Wentland harry.wentland@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../gpu/drm/amd/display/dc/core/dc_link_ddc.c | 52 +++++++++---------- 1 file changed, 25 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 51991bf26a93c..4c90d68db2307 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -126,22 +126,16 @@ struct aux_payloads { struct vector payloads; };
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) +static bool dal_ddc_i2c_payloads_create( + struct dc_context *ctx, + struct i2c_payloads *payloads, + uint32_t count) { - struct i2c_payloads *payloads; - - payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL); - - if (!payloads) - return NULL; - if (dal_vector_construct( &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) - return payloads; - - kfree(payloads); - return NULL; + return true;
+ return false; }
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) @@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) return p->payloads.count; }
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) +static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p) { - if (!p || !*p) + if (!p) return; - dal_vector_destruct(&(*p)->payloads); - kfree(*p); - *p = NULL;
+ dal_vector_destruct(&p->payloads); }
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) @@ -521,9 +513,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
+ if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) return false;
+ if (!payloads_num) + return false; + /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { @@ -556,23 +552,25 @@ bool dal_ddc_service_query_ddc_data(
ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); } else { - struct i2c_payloads *payloads = - dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); + struct i2c_command command = {0}; + struct i2c_payloads payloads; + + if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) + return false;
- struct i2c_command command = { - .payloads = dal_ddc_i2c_payloads_get(payloads), - .number_of_payloads = 0, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + command.payloads = dal_ddc_i2c_payloads_get(&payloads); + command.number_of_payloads = 0; + command.engine = DDC_I2C_COMMAND_ENGINE; + command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add( - payloads, address, write_size, write_buf, true); + &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add( - payloads, address, read_size, read_buf, false); + &payloads, address, read_size, read_buf, false);
command.number_of_payloads = - dal_ddc_i2c_payloads_get_count(payloads); + dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c( ddc->ctx,
From: Laurent Pinchart laurent.pinchart@ideasonboard.com
[ Upstream commit 2a0a3ae17d36fa86dcf7c8e8d7b7f056ebd6c064 ]
When the DSS initialises its output DPI and SDI ports, failures don't clean up previous successfully initialised ports. This can lead to resource leak or memory corruption. Fix it.
Reported-by: Hans Verkuil hverkuil@xs4all.nl Signed-off-by: Laurent Pinchart laurent.pinchart@ideasonboard.com Reviewed-by: Tomi Valkeinen tomi.valkeinen@ti.com Acked-by: Sam Ravnborg sam@ravnborg.org Tested-by: Sebastian Reichel sebastian.reichel@collabora.com Reviewed-by: Sebastian Reichel sebastian.reichel@collabora.com Signed-off-by: Tomi Valkeinen tomi.valkeinen@ti.com Link: https://patchwork.freedesktop.org/patch/msgid/20200226112514.12455-22-lauren... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/omapdrm/dss/dss.c | 43 +++++++++++++++++++------------ 1 file changed, 26 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 4bdd63b571002..ac93dae2a9c84 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1151,46 +1151,38 @@ static const struct dss_features dra7xx_dss_feats = { .has_lcd_clk_src = true, };
-static int dss_init_ports(struct dss_device *dss) +static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports) { struct platform_device *pdev = dss->pdev; struct device_node *parent = pdev->dev.of_node; struct device_node *port; unsigned int i; - int r;
- for (i = 0; i < dss->feat->num_ports; i++) { + for (i = 0; i < num_ports; i++) { port = of_graph_get_port_by_id(parent, i); if (!port) continue;
switch (dss->feat->ports[i]) { case OMAP_DISPLAY_TYPE_DPI: - r = dpi_init_port(dss, pdev, port, dss->feat->model); - if (r) - return r; + dpi_uninit_port(port); break; - case OMAP_DISPLAY_TYPE_SDI: - r = sdi_init_port(dss, pdev, port); - if (r) - return r; + sdi_uninit_port(port); break; - default: break; } } - - return 0; }
-static void dss_uninit_ports(struct dss_device *dss) +static int dss_init_ports(struct dss_device *dss) { struct platform_device *pdev = dss->pdev; struct device_node *parent = pdev->dev.of_node; struct device_node *port; - int i; + unsigned int i; + int r;
for (i = 0; i < dss->feat->num_ports; i++) { port = of_graph_get_port_by_id(parent, i); @@ -1199,15 +1191,32 @@ static void dss_uninit_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) { case OMAP_DISPLAY_TYPE_DPI: - dpi_uninit_port(port); + r = dpi_init_port(dss, pdev, port, dss->feat->model); + if (r) + goto error; break; + case OMAP_DISPLAY_TYPE_SDI: - sdi_uninit_port(port); + r = sdi_init_port(dss, pdev, port); + if (r) + goto error; break; + default: break; } } + + return 0; + +error: + __dss_uninit_ports(dss, i); + return r; +} + +static void dss_uninit_ports(struct dss_device *dss) +{ + __dss_uninit_ports(dss, dss->feat->num_ports); }
static int dss_video_pll_probe(struct dss_device *dss)
From: Jiri Pirko jiri@mellanox.com
[ Upstream commit bb0858d8bc828ebc3eaa90be02a0f32bca3c2351 ]
Looks like the iavf code actually experienced a race condition, when a developer took code before the check for chain 0 was put to helper. So use tc_cls_can_offload_and_chain0() helper instead of direct check and move the check to _cb() so this is similar to i40e code.
Signed-off-by: Jiri Pirko jiri@mellanox.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/iavf/iavf_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 34124c213d27c..222ae76809aa1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3077,9 +3077,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter, static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, struct flow_cls_offload *cls_flower) { - if (cls_flower->common.chain_index) - return -EOPNOTSUPP; - switch (cls_flower->command) { case FLOW_CLS_REPLACE: return iavf_configure_clsflower(adapter, cls_flower); @@ -3103,6 +3100,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter, static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + struct iavf_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return iavf_setup_tc_cls_flower(cb_priv, type_data);
From: James Morse james.morse@arm.com
[ Upstream commit 54f529a6806c9710947a4f2cdc15d6ea54121ccd ]
SDEI has private events that need registering and enabling on each CPU. CPUs can come and go while we are trying to do this. SDEI tries to avoid these problems by setting the reregister flag before the register call, so any CPUs that come online register the event too. Sticking plaster like this doesn't work, as if the register call fails, a CPU that subsequently comes online will register the event before reregister is cleared.
Take cpus_read_lock() around the register and enable calls. We don't want surprise CPUs to do the wrong thing if they race with these calls failing.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/firmware/arm_sdei.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index eb2df89d4924f..e497785cd99fe 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -412,14 +412,19 @@ int sdei_event_enable(u32 event_num) return -ENOENT; }
- spin_lock(&sdei_list_lock); - event->reenable = true; - spin_unlock(&sdei_list_lock);
+ cpus_read_lock(); if (event->type == SDEI_EVENT_TYPE_SHARED) err = sdei_api_event_enable(event->event_num); else err = sdei_do_cross_call(_local_event_enable, event); + + if (!err) { + spin_lock(&sdei_list_lock); + event->reenable = true; + spin_unlock(&sdei_list_lock); + } + cpus_read_unlock(); mutex_unlock(&sdei_events_lock);
return err; @@ -621,21 +626,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; }
- spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - + cpus_read_lock(); err = _sdei_event_register(event); if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); + } else { + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); } + cpus_read_unlock(); } while (0); mutex_unlock(&sdei_events_lock);
From: Qian Cai cai@lca.pw
[ Upstream commit e00d996a4317aff5351c4338dd97d390225412c2 ]
Fields in "struct timer_rand_state" could be accessed concurrently. Lockless plain reads and writes result in data races. Fix them by adding pairs of READ|WRITE_ONCE(). The data races were reported by KCSAN,
BUG: KCSAN: data-race in add_timer_randomness / add_timer_randomness
write to 0xffff9f320a0a01d0 of 8 bytes by interrupt on cpu 22: add_timer_randomness+0x100/0x190 add_timer_randomness at drivers/char/random.c:1152 add_disk_randomness+0x85/0x280 scsi_end_request+0x43a/0x4a0 scsi_io_completion+0xb7/0x7e0 scsi_finish_command+0x1ed/0x2a0 scsi_softirq_done+0x1c9/0x1d0 blk_done_softirq+0x181/0x1d0 __do_softirq+0xd9/0x57c irq_exit+0xa2/0xc0 do_IRQ+0x8b/0x190 ret_from_intr+0x0/0x42 cpuidle_enter_state+0x15e/0x980 cpuidle_enter+0x69/0xc0 call_cpuidle+0x23/0x40 do_idle+0x248/0x280 cpu_startup_entry+0x1d/0x1f start_secondary+0x1b2/0x230 secondary_startup_64+0xb6/0xc0
no locks held by swapper/22/0. irq event stamp: 32871382 _raw_spin_unlock_irqrestore+0x53/0x60 _raw_spin_lock_irqsave+0x21/0x60 _local_bh_enable+0x21/0x30 irq_exit+0xa2/0xc0
read to 0xffff9f320a0a01d0 of 8 bytes by interrupt on cpu 2: add_timer_randomness+0xe8/0x190 add_disk_randomness+0x85/0x280 scsi_end_request+0x43a/0x4a0 scsi_io_completion+0xb7/0x7e0 scsi_finish_command+0x1ed/0x2a0 scsi_softirq_done+0x1c9/0x1d0 blk_done_softirq+0x181/0x1d0 __do_softirq+0xd9/0x57c irq_exit+0xa2/0xc0 do_IRQ+0x8b/0x190 ret_from_intr+0x0/0x42 cpuidle_enter_state+0x15e/0x980 cpuidle_enter+0x69/0xc0 call_cpuidle+0x23/0x40 do_idle+0x248/0x280 cpu_startup_entry+0x1d/0x1f start_secondary+0x1b2/0x230 secondary_startup_64+0xb6/0xc0
no locks held by swapper/2/0. irq event stamp: 37846304 _raw_spin_unlock_irqrestore+0x53/0x60 _raw_spin_lock_irqsave+0x21/0x60 _local_bh_enable+0x21/0x30 irq_exit+0xa2/0xc0
Reported by Kernel Concurrency Sanitizer on: Hardware name: HP ProLiant BL660c Gen9, BIOS I38 10/17/2018
Link: https://lore.kernel.org/r/1582648024-13111-1-git-send-email-cai@lca.pw Signed-off-by: Qian Cai cai@lca.pw Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/random.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c index e877c20e0ee02..75a8f7f572697 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1223,14 +1223,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies);
- delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta);
- delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2);
if (delta < 0) delta = -delta;
From: John Garry john.garry@huawei.com
[ Upstream commit a6dd255bdd7d00bbdbf78ba00bde9fc64f86c3a7 ]
Some released ACPI FW for Huawei boards describes incorrect the port IO address range for child devices, in that it tells us the IO port max range is 0x3fff for each child device, which is not correct. The address range should be [e4:e8) or similar. With this incorrect upper range, the child device IO port resources overlap.
As such, the kernel thinks that the LPC host serial device is a child of the IPMI device:
root@(none)$ more /proc/ioports [...] 00ffc0e3-00ffffff : hisi-lpc-ipmi.0.auto 00ffc0e3-00ffc0e3 : ipmi_si 00ffc0e4-00ffc0e4 : ipmi_si 00ffc0e5-00ffc0e5 : ipmi_si 00ffc2f7-00ffffff : serial8250.1.auto 00ffc2f7-00ffc2fe : serial root@(none)$
They should both be siblings. Note that these are logical PIO addresses, which have a direct mapping from the FW IO port ranges.
This shows up as a real issue when we enable CONFIG_KASAN and CONFIG_DEBUG_TEST_DRIVER_REMOVE - we see use-after-free warnings in the host removal path:
================================================================== BUG: KASAN: use-after-free in release_resource+0x38/0xc8 Read of size 8 at addr ffff0026accdbc38 by task swapper/0/1
CPU: 2 PID: 1 Comm: swapper/0 Not tainted 5.5.0-rc6-00001-g68e186e77b5c-dirty #1593 Hardware name: Huawei Taishan 2180 /D03, BIOS Hisilicon D03 IT20 Nemo 2.0 RC0 03/30/2018 Call trace: dump_backtrace+0x0/0x290 show_stack+0x14/0x20 dump_stack+0xf0/0x14c print_address_description.isra.9+0x6c/0x3b8 __kasan_report+0x12c/0x23c kasan_report+0xc/0x18 __asan_load8+0x94/0xb8 release_resource+0x38/0xc8 platform_device_del.part.10+0x80/0xe0 platform_device_unregister+0x20/0x38 hisi_lpc_acpi_remove_subdev+0x10/0x20 device_for_each_child+0xc8/0x128 hisi_lpc_acpi_remove+0x4c/0xa8 hisi_lpc_remove+0xbc/0xc0 platform_drv_remove+0x3c/0x68 really_probe+0x174/0x548 driver_probe_device+0x7c/0x148 device_driver_attach+0x94/0xa0 __driver_attach+0xa4/0x110 bus_for_each_dev+0xe8/0x158 driver_attach+0x30/0x40 bus_add_driver+0x234/0x2f0 driver_register+0xbc/0x1d0 __platform_driver_register+0x7c/0x88 hisi_lpc_driver_init+0x18/0x20 do_one_initcall+0xb4/0x258 kernel_init_freeable+0x248/0x2c0 kernel_init+0x10/0x118 ret_from_fork+0x10/0x1c
...
The issue here is that the kernel created an incorrect parent-child resource dependency between two devices, and references the false parent node when deleting the second child device, when it had been deleted already.
Fix up the child device resources from FW to create proper IO port resource relationships for broken FW.
With this, the IO port layout looks more healthy:
root@(none)$ more /proc/ioports [...] 00ffc0e3-00ffc0e7 : hisi-lpc-ipmi.0.auto 00ffc0e3-00ffc0e3 : ipmi_si 00ffc0e4-00ffc0e4 : ipmi_si 00ffc0e5-00ffc0e5 : ipmi_si 00ffc2f7-00ffc2ff : serial8250.1.auto 00ffc2f7-00ffc2fe : serial
Signed-off-by: John Garry john.garry@huawei.com Signed-off-by: Wei Xu xuwei5@hisilicon.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/bus/hisi_lpc.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 20c957185af20..2e9252d37a18f 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, return 0; }
+/* + * Released firmware describes the IO port max address as 0x3fff, which is + * the max host bus address. Fixup to a proper range. This will probably + * never be fixed in firmware. + */ +static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, + struct resource *r) +{ + if (r->end != 0x3fff) + return; + + if (r->start == 0xe4) + r->end = 0xe4 + 0x04 - 1; + else if (r->start == 0x2f8) + r->end = 0x2f8 + 0x08 - 1; + else + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", + r); +} + /* * hisi_lpc_acpi_set_io_res - set the resources for a child * @child: the device node to be updated the I/O resource @@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, return -ENOMEM; } count = 0; - list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + list_for_each_entry(rentry, &resource_list, node) { + resources[count] = *rentry->res; + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); + count++; + }
acpi_dev_free_resource_list(&resource_list);
From: Jaska Uimonen jaska.uimonen@linux.intel.com
[ Upstream commit 1919b42ca4ad75a2397081164661af3ce5a7b8f4 ]
In tx_wait_done the ipc payload is copied before the DSP transaction error code is checked. This might lead to corrupted data in kernel side even though the error would be handled later. It is also pointless to copy the data in case of error. So change the order of error check and copy.
Signed-off-by: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com Signed-off-by: Jaska Uimonen jaska.uimonen@linux.intel.com Link: https://lore.kernel.org/r/20200228231850.9226-3-pierre-louis.bossart@linux.i... Signed-off-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- sound/soc/sof/ipc.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c index e7b1a80e2a14c..f38f651da2246 100644 --- a/sound/soc/sof/ipc.c +++ b/sound/soc/sof/ipc.c @@ -215,15 +215,17 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg, snd_sof_trace_notify_for_error(ipc->sdev); ret = -ETIMEDOUT; } else { - /* copy the data returned from DSP */ ret = msg->reply_error; - if (msg->reply_size) - memcpy(reply_data, msg->reply_data, msg->reply_size); - if (ret < 0) + if (ret < 0) { dev_err(sdev->dev, "error: ipc error for 0x%x size %zu\n", hdr->cmd, msg->reply_size); - else + } else { ipc_log_header(sdev->dev, "ipc tx succeeded", hdr->cmd); + if (msg->reply_size) + /* copy the data returned from DSP */ + memcpy(reply_data, msg->reply_data, + msg->reply_size); + } }
return ret;
From: Takashi Iwai tiwai@suse.de
[ Upstream commit a3ea410cac41b19a5490aad7fe6d9a9a772e646e ]
Josef reported that his old-and-good Plextor ConvertX M402U video converter spews lots of WARNINGs on the recent kernels, and it turned out that the device uses a bulk endpoint for interrupt handling just like 2250 board.
For fixing it, generalize the check with the proper verification of the endpoint instead of hard-coded board type check.
Fixes: 7e5219d18e93 ("[media] go7007: Fix 2250 urb type") Reported-and-tested-by: Josef Möllers josef.moellers@suse.com BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1162583 BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206427
Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Hans Verkuil hverkuil-cisco@xs4all.nl Signed-off-by: Mauro Carvalho Chehab mchehab+huawei@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/media/usb/go7007/go7007-usb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index ff2aa057c1fbc..f889c9d740cd1 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -1044,6 +1044,7 @@ static int go7007_usb_probe(struct usb_interface *intf, struct go7007_usb *usb; const struct go7007_usb_board *board; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_host_endpoint *ep; unsigned num_i2c_devs; char *name; int video_pipe, i, v_urb_len; @@ -1140,7 +1141,8 @@ static int go7007_usb_probe(struct usb_interface *intf, if (usb->intr_urb->transfer_buffer == NULL) goto allocfail;
- if (go->board_id == GO7007_BOARDID_SENSORAY_2250) + ep = usb->usbdev->ep_in[4]; + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, usb_rcvbulkpipe(usb->usbdev, 4), usb->intr_urb->transfer_buffer, 2*sizeof(u16),
From: Alain Michaud alainm@chromium.org
[ Upstream commit 08bb4da90150e2a225f35e0f642cdc463958d696 ]
Some controllers have been observed to send zero'd events under some conditions. This change guards against this condition as well as adding a trace to facilitate diagnosability of this condition.
Signed-off-by: Alain Michaud alainm@chromium.org Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/hci_event.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 7bf6860fed783..1bbeb14b8b64e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5853,6 +5853,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) u8 status = 0, event = hdr->evt, req_evt = 0; u16 opcode = HCI_OP_NOP;
+ if (!event) { + bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); + goto done; + } + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(cmd_hdr->opcode); @@ -6064,6 +6069,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) req_complete_skb(hdev, status, opcode, orig_skb); }
+done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++;
From: Wen Yang wenyang@linux.alibaba.com
[ Upstream commit 4cbbc3a0eeed675449b1a4d080008927121f3da3 ]
While unlikely the divisor in scale64_check_overflow() could be >= 32bit in scale64_check_overflow(). do_div() truncates the divisor to 32bit at least on 32bit platforms.
Use div64_u64() instead to avoid the truncation to 32-bit.
[ tglx: Massaged changelog ]
Signed-off-by: Wen Yang wenyang@linux.alibaba.com Signed-off-by: Thomas Gleixner tglx@linutronix.de Link: https://lkml.kernel.org/r/20200120100523.45656-1-wenyang@linux.alibaba.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/time/timekeeping.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ca69290bee2a3..4fc2af4367a7b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1005,9 +1005,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) return -EOVERFLOW; tmp *= mult; - rem *= mult;
- do_div(rem, div); + rem = div64_u64(rem * mult, div); *base = tmp + rem; return 0; }
From: Alexey Kardashevskiy aik@ozlabs.ru
[ Upstream commit c4b78169e3667413184c9a20e11b5832288a109f ]
The last jump to free_exit in mm_iommu_do_alloc() happens after page pointers in struct mm_iommu_table_group_mem_t were already converted to physical addresses. Thus calling put_page() on these physical addresses will likely crash.
This moves the loop which calculates the pageshift and converts page struct pointers to physical addresses later after the point when we cannot fail; thus eliminating the need to convert pointers back.
Fixes: eb9d7a62c386 ("powerpc/mm_iommu: Fix potential deadlock") Reported-by: Jan Kara jack@suse.cz Signed-off-by: Alexey Kardashevskiy aik@ozlabs.ru Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20191223060351.26359-1-aik@ozlabs.ru Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/mm/book3s64/iommu_api.c | 39 +++++++++++++++------------- 1 file changed, 21 insertions(+), 18 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 56cc845205779..ef164851738b8 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, goto free_exit; }
- pageshift = PAGE_SHIFT; - for (i = 0; i < entries; ++i) { - struct page *page = mem->hpages[i]; - - /* - * Allow to use larger than 64k IOMMU pages. Only do that - * if we are backed by hugetlb. - */ - if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) - pageshift = page_shift(compound_head(page)); - mem->pageshift = min(mem->pageshift, pageshift); - /* - * We don't need struct page reference any more, switch - * to physical address. - */ - mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; - } - good_exit: atomic64_set(&mem->mapped, 1); mem->used = 1; @@ -158,6 +140,27 @@ good_exit: } }
+ if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { + /* + * Allow to use larger than 64k IOMMU pages. Only do that + * if we are backed by hugetlb. Skip device memory as it is not + * backed with page structs. + */ + pageshift = PAGE_SHIFT; + for (i = 0; i < entries; ++i) { + struct page *page = mem->hpages[i]; + + if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) + pageshift = page_shift(compound_head(page)); + mem->pageshift = min(mem->pageshift, pageshift); + /* + * We don't need struct page reference any more, switch + * to physical address. + */ + mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; + } + } + list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
mutex_unlock(&mem_list_mutex);
From: Wenjing Liu Wenjing.Liu@amd.com
[ Upstream commit df8e34ac27e8a0d8dce364628226c5619693c3fd ]
[why] When combining two or more pipes in DSC mode, there will always be more than 1 slice per line. In this case, as per DSC rules, the sink device is expecting that the ICH is reset at the end of each slice line (i.e. ICH_RESET_AT_END_OF_LINE must be configured based on the number of slices at the output of ODM). It is recommended that software set ICH_RESET_AT_END_OF_LINE = 0xF for each DSC in the ODM combine. However the current code only set ICH_RESET_AT_END_OF_LINE = 0xF when number of slice per DSC engine is greater than 1 instead of number of slice per output after ODM combine.
[how] Add is_odm in dsc config. Set ICH_RESET_AT_END_OF_LINE = 0xF if either is_odm or number of slice per DSC engine is greater than 1.
Signed-off-by: Wenjing Liu Wenjing.Liu@amd.com Reviewed-by: Nikola Cornij Nikola.Cornij@amd.com Acked-by: Rodrigo Siqueira Rodrigo.Siqueira@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 2 ++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h | 1 + 4 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 5d6cbaebebc03..5641a9477d291 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -400,6 +400,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -504,6 +505,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
DC_LOG_DSC(" "); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 01040501d40e3..5c45c39662fbb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -351,6 +351,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; + dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. @@ -513,7 +514,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; - reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf; }
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 05b98eadc2899..bfa01137f8e09 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2275,6 +2275,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 1ddb1c6fa1493..75ecfdc5d5cd2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -36,6 +36,7 @@ struct dsc_config { uint32_t pic_height; enum dc_pixel_encoding pixel_encoding; enum dc_color_depth color_depth; /* Bits per component */ + bool is_odm; struct dc_dsc_config dc_dsc_cfg; };
From: Qiujun Huang hqjagain@gmail.com
[ Upstream commit dce8e237100f60c28cc66effb526ba65a01d8cb3 ]
KCSAN find inode->i_disksize could be accessed concurrently.
BUG: KCSAN: data-race in ext4_mark_iloc_dirty / ext4_write_end
write (marked) to 0xffff8b8932f40090 of 8 bytes by task 66792 on cpu 0: ext4_write_end+0x53f/0x5b0 ext4_da_write_end+0x237/0x510 generic_perform_write+0x1c4/0x2a0 ext4_buffered_write_iter+0x13a/0x210 ext4_file_write_iter+0xe2/0x9b0 new_sync_write+0x29c/0x3a0 __vfs_write+0x92/0xa0 vfs_write+0xfc/0x2a0 ksys_write+0xe8/0x140 __x64_sys_write+0x4c/0x60 do_syscall_64+0x8a/0x2a0 entry_SYSCALL_64_after_hwframe+0x44/0xa9
read to 0xffff8b8932f40090 of 8 bytes by task 14414 on cpu 1: ext4_mark_iloc_dirty+0x716/0x1190 ext4_mark_inode_dirty+0xc9/0x360 ext4_convert_unwritten_extents+0x1bc/0x2a0 ext4_convert_unwritten_io_end_vec+0xc5/0x150 ext4_put_io_end+0x82/0x130 ext4_writepages+0xae7/0x16f0 do_writepages+0x64/0x120 __writeback_single_inode+0x7d/0x650 writeback_sb_inodes+0x3a4/0x860 __writeback_inodes_wb+0xc4/0x150 wb_writeback+0x43f/0x510 wb_workfn+0x3b2/0x8a0 process_one_work+0x39b/0x7e0 worker_thread+0x88/0x650 kthread+0x1d4/0x1f0 ret_from_fork+0x35/0x40
The plain read is outside of inode->i_data_sem critical section which results in a data race. Fix it by adding READ_ONCE().
Signed-off-by: Qiujun Huang hqjagain@gmail.com Link: https://lore.kernel.org/r/1582556566-3909-1-git-send-email-hqjagain@gmail.co... Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ext4/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a284d99a1ee57..95a8a04c77dd3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle, raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); - if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { + if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; }
From: John Garry john.garry@huawei.com
[ Upstream commit 3f5777fbaf04c58d940526a22a2e0c813c837936 ]
The memory for global pointer is never freed during normal program execution, so let's do that in the main function exit as a good programming practice.
A stray blank line is also removed.
Reported-by: Jiri Olsa jolsa@redhat.com Signed-off-by: John Garry john.garry@huawei.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Andi Kleen ak@linux.intel.com Cc: James Clark james.clark@arm.com Cc: Joakim Zhang qiangqing.zhang@nxp.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Will Deacon will@kernel.org Cc: linuxarm@huawei.com Link: http://lore.kernel.org/lkml/1583406486-154841-2-git-send-email-john.garry@hu... Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/pmu-events/jevents.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index d36ae65ae3330..f4a0d72246cb7 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -1068,10 +1068,9 @@ static int process_one_file(const char *fpath, const struct stat *sb, */ int main(int argc, char *argv[]) { - int rc; + int rc, ret = 0; int maxfds; char ldirname[PATH_MAX]; - const char *arch; const char *output_file; const char *start_dirname; @@ -1142,7 +1141,8 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; + goto out_free_mapfile; } else if (rc) { goto empty_map; } @@ -1160,14 +1160,17 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; }
- return 0; + + goto out_free_mapfile;
empty_map: fclose(eventsfp); create_empty_mapping(output_file); free_arch_std_events(); - return 0; +out_free_mapfile: + free(mapfile); + return ret; }
From: "Kirill A. Shutemov" kirill@shutemov.name
[ Upstream commit c3e5ea6ee574ae5e845a40ac8198de1fb63bb3ab ]
Jeff Moyer has reported that one of xfstests triggers a warning when run on DAX-enabled filesystem:
WARNING: CPU: 76 PID: 51024 at mm/memory.c:2317 wp_page_copy+0xc40/0xd50 ... wp_page_copy+0x98c/0xd50 (unreliable) do_wp_page+0xd8/0xad0 __handle_mm_fault+0x748/0x1b90 handle_mm_fault+0x120/0x1f0 __do_page_fault+0x240/0xd70 do_page_fault+0x38/0xd0 handle_page_fault+0x10/0x30
The warning happens on failed __copy_from_user_inatomic() which tries to copy data into a CoW page.
This happens because of race between MADV_DONTNEED and CoW page fault:
CPU0 CPU1 handle_mm_fault() do_wp_page() wp_page_copy() do_wp_page() madvise(MADV_DONTNEED) zap_page_range() zap_pte_range() ptep_get_and_clear_full() <TLB flush> __copy_from_user_inatomic() sees empty PTE and fails WARN_ON_ONCE(1) clear_page()
The solution is to re-try __copy_from_user_inatomic() under PTL after checking that PTE is matches the orig_pte.
The second copy attempt can still fail, like due to non-readable PTE, but there's nothing reasonable we can do about, except clearing the CoW page.
Reported-by: Jeff Moyer jmoyer@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Tested-by: Jeff Moyer jmoyer@redhat.com Cc: stable@vger.kernel.org Cc: Justin He Justin.He@arm.com Cc: Dan Williams dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20200218154151.13349-1-kirill.shutemov@linux.intel.... Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/memory.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index 9ea917e28ef4e..2157bb28117ac 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2163,7 +2163,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, bool ret; void *kaddr; void __user *uaddr; - bool force_mkyoung; + bool locked = false; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; @@ -2188,11 +2188,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ - force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte); - if (force_mkyoung) { + if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { pte_t entry;
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { /* * Other thread has already handled the fault @@ -2216,18 +2216,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + if (locked) + goto warn; + + /* Re-validate under PTL if the page is still mapped */ + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* The PTE changed under us. Retry page fault. */ + ret = false; + goto pte_unlock; + } + /* - * Give a warn in case there can be some obscure - * use-case + * The same page can be mapped back since last copy attampt. + * Try to copy again under PTL. */ - WARN_ON_ONCE(1); - clear_page(kaddr); + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + /* + * Give a warn in case there can be some obscure + * use-case + */ +warn: + WARN_ON_ONCE(1); + clear_page(kaddr); + } }
ret = true;
pte_unlock: - if (force_mkyoung) + if (locked) pte_unmap_unlock(vmf->pte, vmf->ptl); kunmap_atomic(kaddr); flush_dcache_page(dst);
From: John Clements john.clements@amd.com
[ Upstream commit 1b3460a8b19688ad3033b75237d40fa580a5a953 ]
mitigates race condition on BACO reset between GPU bootcode and driver reload
Reviewed-by: Hawking Zhang Hawking.Zhang@amd.com Signed-off-by: John Clements john.clements@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/atom.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index dd30f4e61a8cd..cae426c7c0863 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else {
From: Tony Lindgren tony@atomide.com
[ Upstream commit 55be2f50336f67800513b46c5ba6270e4ed0e784 ]
We need to check for errors when calling cpu_pm_enter() and cpu_cluster_pm_enter(). And we need to bail out on errors as otherwise we can enter a deeper idle state when not desired.
I'm not aware of the lack of error handling causing issues yet, but we need this at least for blocking deeper idle states when a GPIO instance has pending interrupts.
Cc: Dave Gerlach d-gerlach@ti.com Cc: Grygorii Strashko grygorii.strashko@ti.com Cc: Keerthy j-keerthy@ti.com Cc: Ladislav Michl ladis@linux-mips.org Cc: Russell King rmk+kernel@armlinux.org.uk Cc: Tero Kristo t-kristo@ti.com Signed-off-by: Tony Lindgren tony@atomide.com Link: https://lore.kernel.org/r/20200304225433.37336-2-tony@atomide.com Signed-off-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm/mach-omap2/cpuidle34xx.c | 9 +++++++-- arch/arm/mach-omap2/cpuidle44xx.c | 26 +++++++++++++++++--------- arch/arm/mach-omap2/pm34xx.c | 8 ++++++-- 3 files changed, 30 insertions(+), 13 deletions(-)
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 532a3e4b98c6f..090a8aafb25e1 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, int index) { struct omap3_idle_statedata *cx = &omap3_idle_data[index]; + int error;
if (omap_irq_pending() || need_resched()) goto return_sleep_time; @@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev, * Call idle CPU PM enter notifier chain so that * VFP context is saved. */ - if (cx->mpu_state == PWRDM_POWER_OFF) - cpu_pm_enter(); + if (cx->mpu_state == PWRDM_POWER_OFF) { + error = cpu_pm_enter(); + if (error) + goto out_clkdm_set; + }
/* Execute ARM wfi */ omap_sram_idle(); @@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev, pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) cpu_pm_exit();
+out_clkdm_set: /* Re-allow idle for C1 */ if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index fe75d4fa60738..6f5f89711f256 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, { struct idle_statedata *cx = state_ptr + index; u32 mpuss_can_lose_context = 0; + int error;
/* * CPU0 has to wait and stay ON until CPU1 is OFF state. @@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. */ - cpu_pm_enter(); + error = cpu_pm_enter(); + if (error) + goto cpu_pm_out;
if (dev->cpu == 0) { pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); @@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, * Call idle CPU cluster PM enter notifier chain * to save GIC and wakeupgen context. */ - if (mpuss_can_lose_context) - cpu_cluster_pm_enter(); + if (mpuss_can_lose_context) { + error = cpu_cluster_pm_enter(); + if (error) + goto cpu_cluster_pm_out; + } }
omap4_enter_lowpower(dev->cpu, cx->cpu_state); cpu_done[dev->cpu] = true;
+cpu_cluster_pm_out: /* Wakeup CPU1 only if it is not offlined */ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
@@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, } }
- /* - * Call idle CPU PM exit notifier chain to restore - * VFP and per CPU IRQ context. - */ - cpu_pm_exit(); - /* * Call idle CPU cluster PM exit notifier chain * to restore GIC and wakeupgen context. @@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit();
+ /* + * Call idle CPU PM exit notifier chain to restore + * VFP and per CPU IRQ context. + */ + cpu_pm_exit(); + +cpu_pm_out: tick_broadcast_exit();
fail: diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 54254fc92c2ed..fa66534a7ae22 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -194,6 +194,7 @@ void omap_sram_idle(void) int per_next_state = PWRDM_POWER_ON; int core_next_state = PWRDM_POWER_ON; u32 sdrc_pwr = 0; + int error;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); switch (mpu_next_state) { @@ -222,8 +223,11 @@ void omap_sram_idle(void) pwrdm_pre_transition(NULL);
/* PER */ - if (per_next_state == PWRDM_POWER_OFF) - cpu_cluster_pm_enter(); + if (per_next_state == PWRDM_POWER_OFF) { + error = cpu_cluster_pm_enter(); + if (error) + return; + }
/* CORE */ if (core_next_state < PWRDM_POWER_ON) {
From: Rodrigo Siqueira Rodrigo.Siqueira@amd.com
[ Upstream commit a0e40018dcc3f59a10ca21d58f8ea8ceb1b035ac ]
Raven provides retimer feature support that requires i2c interaction in order to make it work well, all settings required for this configuration are loaded from the Atom bios which include the i2c address. If the retimer feature is not available, we should abort the attempt to set this feature, otherwise, it makes the following line return I2C_CHANNEL_OPERATION_NO_RESPONSE:
i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); ... if (!i2c_success) ASSERT(i2c_success);
This ends up causing problems with hotplugging HDMI displays on Raven, and causes retimer settings to warn like so:
WARNING: CPU: 1 PID: 429 at drivers/gpu/drm/amd/amdgpu/../dal/dc/core/dc_link.c:1998 write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Modules linked in: edac_mce_amd ccp kvm irqbypass binfmt_misc crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio snd_hda_codec_hdmi snd_hda_intel amdgpu(+) snd_hda_codec snd_hda_core snd_hwdep snd_pcm snd_seq_midi snd_seq_midi_event snd_rawmidi aesni_intel snd_seq amd_iommu_v2 gpu_sched aes_x86_64 crypto_simd cryptd glue_helper snd_seq_device ttm drm_kms_helper snd_timer eeepc_wmi wmi_bmof asus_wmi sparse_keymap drm mxm_wmi snd k10temp fb_sys_fops syscopyarea sysfillrect sysimgblt soundcore joydev input_leds mac_hid sch_fq_codel parport_pc ppdev lp parport ip_tables x_tables autofs4 igb i2c_algo_bit hid_generic usbhid i2c_piix4 dca ahci hid libahci video wmi gpio_amdpt gpio_generic CPU: 1 PID: 429 Comm: systemd-udevd Tainted: G W 5.2.0-rc1sept162019+ #1 Hardware name: System manufacturer System Product Name/ROG STRIX B450-F GAMING, BIOS 2605 08/06/2019 RIP: 0010:write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Code: ff 0f b6 4d ce 44 0f b6 45 cf 44 0f b6 c8 45 89 cf 44 89 e2 48 c7 c6 f0 34 bc c0 bf 04 00 00 00 e8 63 b0 90 ff 45 84 ff 75 02 <0f> 0b 42 0f b6 04 73 8d 50 f6 80 fa 02 77 8c 3c 0a 0f 85 c8 00 00 RSP: 0018:ffffa99d02726fd0 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffffa99d02727035 RCX: 0000000000000006 RDX: 0000000000000000 RSI: 0000000000000002 RDI: ffff976acc857440 RBP: ffffa99d02727018 R08: 0000000000000002 R09: 000000000002a600 R10: ffffe90610193680 R11: 00000000000005e3 R12: 000000000000005d R13: ffff976ac4b201b8 R14: 0000000000000001 R15: 0000000000000000 FS: 00007f14f99e1680(0000) GS:ffff976acc840000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fdf212843b8 CR3: 0000000408906000 CR4: 00000000003406e0 Call Trace: core_link_enable_stream+0x626/0x680 [amdgpu] dce110_apply_ctx_to_hw+0x414/0x4e0 [amdgpu] dc_commit_state+0x331/0x5e0 [amdgpu] ? drm_calc_timestamping_constants+0xf9/0x150 [drm] amdgpu_dm_atomic_commit_tail+0x395/0x1e00 [amdgpu] ? dm_plane_helper_prepare_fb+0x20c/0x280 [amdgpu] commit_tail+0x42/0x70 [drm_kms_helper] drm_atomic_helper_commit+0x10c/0x120 [drm_kms_helper] amdgpu_dm_atomic_commit+0x95/0xa0 [amdgpu] drm_atomic_commit+0x4a/0x50 [drm] restore_fbdev_mode_atomic+0x1c0/0x1e0 [drm_kms_helper] restore_fbdev_mode+0x4c/0x160 [drm_kms_helper] ? _cond_resched+0x19/0x40 drm_fb_helper_restore_fbdev_mode_unlocked+0x4e/0xa0 [drm_kms_helper] drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper] fbcon_init+0x471/0x630 visual_init+0xd5/0x130 do_bind_con_driver+0x20a/0x430 do_take_over_console+0x7d/0x1b0 do_fbcon_takeover+0x5c/0xb0 fbcon_event_notify+0x6cd/0x8a0 notifier_call_chain+0x4c/0x70 blocking_notifier_call_chain+0x43/0x60 fb_notifier_call_chain+0x1b/0x20 register_framebuffer+0x254/0x360 __drm_fb_helper_initial_config_and_unlock+0x2c5/0x510 [drm_kms_helper] drm_fb_helper_initial_config+0x35/0x40 [drm_kms_helper] amdgpu_fbdev_init+0xcd/0x100 [amdgpu] amdgpu_device_init+0x1156/0x1930 [amdgpu] amdgpu_driver_load_kms+0x8d/0x2e0 [amdgpu] drm_dev_register+0x12b/0x1c0 [drm] amdgpu_pci_probe+0xd3/0x160 [amdgpu] local_pci_probe+0x47/0xa0 pci_device_probe+0x142/0x1b0 really_probe+0xf5/0x3d0 driver_probe_device+0x11b/0x130 device_driver_attach+0x58/0x60 __driver_attach+0xa3/0x140 ? device_driver_attach+0x60/0x60 ? device_driver_attach+0x60/0x60 bus_for_each_dev+0x74/0xb0 ? kmem_cache_alloc_trace+0x1a3/0x1c0 driver_attach+0x1e/0x20 bus_add_driver+0x147/0x220 ? 0xffffffffc0cb9000 driver_register+0x60/0x100 ? 0xffffffffc0cb9000 __pci_register_driver+0x5a/0x60 amdgpu_init+0x74/0x83 [amdgpu] do_one_initcall+0x4a/0x1fa ? _cond_resched+0x19/0x40 ? kmem_cache_alloc_trace+0x3f/0x1c0 ? __vunmap+0x1cc/0x200 do_init_module+0x5f/0x227 load_module+0x2330/0x2b40 __do_sys_finit_module+0xfc/0x120 ? __do_sys_finit_module+0xfc/0x120 __x64_sys_finit_module+0x1a/0x20 do_syscall_64+0x5a/0x130 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f14f9500839 Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007fff9bc4f5a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 RAX: ffffffffffffffda RBX: 000055afb5abce30 RCX: 00007f14f9500839 RDX: 0000000000000000 RSI: 000055afb5ace0f0 RDI: 0000000000000017 RBP: 000055afb5ace0f0 R08: 0000000000000000 R09: 000000000000000a R10: 0000000000000017 R11: 0000000000000246 R12: 0000000000000000 R13: 000055afb5aad800 R14: 0000000000020000 R15: 0000000000000000 ---[ end trace c286e96563966f08 ]---
This commit reworks the way that we handle i2c write for retimer in the way that we abort this configuration if the feature is not available in the device. For debug sake, we kept a simple log message in case the retimer is not available.
Signed-off-by: Rodrigo Siqueira Rodrigo.Siqueira@amd.com Reviewed-by: Hersen Wu hersenxs.wu@amd.com Acked-by: Rodrigo Siqueira Rodrigo.Siqueira@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 67 ++++++++----------- 1 file changed, 29 insertions(+), 38 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 5bf12a446e952..3efee7b3378a3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1733,8 +1733,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1752,8 +1751,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; }
buffer[0] = offset; @@ -1765,8 +1763,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1786,8 +1783,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1805,8 +1801,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; }
buffer[0] = offset; @@ -1818,8 +1813,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1837,8 +1831,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1849,8 +1842,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1861,10 +1853,14 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
} + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); }
static void write_i2c_default_retimer_setting( @@ -1889,8 +1885,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1901,8 +1896,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1913,8 +1907,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1925,8 +1918,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1937,8 +1929,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1949,8 +1940,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
if (is_vga_mode) { @@ -1965,8 +1955,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1977,8 +1966,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail;
/* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1989,9 +1977,13 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); }
static void write_i2c_redriver_setting( @@ -2020,8 +2012,7 @@ static void write_i2c_redriver_setting( slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); }
static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
From: Anson Huang Anson.Huang@nxp.com
[ Upstream commit 28b2f82e0383e27476be8a5e13d2aea07ebeb275 ]
Fix below division by zero warning:
[ 3.176443] Division by zero in kernel. [ 3.181809] CPU: 0 PID: 88 Comm: kworker/0:2 Not tainted 5.3.0-rc2-next-20190730-63758-ge08da51-dirty #124 [ 3.191817] Hardware name: Freescale i.MX7ULP (Device Tree) [ 3.197821] Workqueue: events dbs_work_handler [ 3.202849] [<c01127d8>] (unwind_backtrace) from [<c010cd80>] (show_stack+0x10/0x14) [ 3.211058] [<c010cd80>] (show_stack) from [<c0c77e68>] (dump_stack+0xd8/0x110) [ 3.218820] [<c0c77e68>] (dump_stack) from [<c0c753c0>] (Ldiv0_64+0x8/0x18) [ 3.226263] [<c0c753c0>] (Ldiv0_64) from [<c05984b4>] (clk_pfdv2_set_rate+0x54/0xac) [ 3.234487] [<c05984b4>] (clk_pfdv2_set_rate) from [<c059192c>] (clk_change_rate+0x1a4/0x698) [ 3.243468] [<c059192c>] (clk_change_rate) from [<c0591a08>] (clk_change_rate+0x280/0x698) [ 3.252180] [<c0591a08>] (clk_change_rate) from [<c0591fc0>] (clk_core_set_rate_nolock+0x1a0/0x278) [ 3.261679] [<c0591fc0>] (clk_core_set_rate_nolock) from [<c05920c8>] (clk_set_rate+0x30/0x64) [ 3.270743] [<c05920c8>] (clk_set_rate) from [<c089cb88>] (imx7ulp_set_target+0x184/0x2a4) [ 3.279501] [<c089cb88>] (imx7ulp_set_target) from [<c0896358>] (__cpufreq_driver_target+0x188/0x514) [ 3.289196] [<c0896358>] (__cpufreq_driver_target) from [<c0899b0c>] (od_dbs_update+0x130/0x15c) [ 3.298438] [<c0899b0c>] (od_dbs_update) from [<c089a5d0>] (dbs_work_handler+0x2c/0x5c) [ 3.306914] [<c089a5d0>] (dbs_work_handler) from [<c0156858>] (process_one_work+0x2ac/0x704) [ 3.315826] [<c0156858>] (process_one_work) from [<c0156cdc>] (worker_thread+0x2c/0x574) [ 3.324404] [<c0156cdc>] (worker_thread) from [<c015cfe8>] (kthread+0x134/0x148) [ 3.332278] [<c015cfe8>] (kthread) from [<c01010b4>] (ret_from_fork+0x14/0x20) [ 3.339858] Exception stack(0xe82d5fb0 to 0xe82d5ff8) [ 3.345314] 5fa0: 00000000 00000000 00000000 00000000 [ 3.353926] 5fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 [ 3.362519] 5fe0: 00000000 00000000 00000000 00000000 00000013 00000000
Signed-off-by: Anson Huang Anson.Huang@nxp.com Signed-off-by: Peng Fan peng.fan@nxp.com Signed-off-by: Shawn Guo shawnguo@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/clk/imx/clk-pfdv2.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c index a03bbed662c6b..2a46b9b61b466 100644 --- a/drivers/clk/imx/clk-pfdv2.c +++ b/drivers/clk/imx/clk-pfdv2.c @@ -139,6 +139,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; u8 frac;
+ if (!rate) + return -EINVAL; + + /* PFD can NOT change rate without gating */ + WARN_ON(clk_pfdv2_is_enabled(hw)); + tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp;
From: Zeng Tao prime.zeng@hisilicon.com
[ Upstream commit 4a33691c4cea9eb0a7c66e87248be4637e14b180 ]
Currently there are only 10 bytes to store the cpu-topology 'name' information. Only 10 bytes copied into cluster/thread/core names.
If the cluster ID exceeds 2-digit number, it will result in the data corruption, and ending up in a dead loop in the parsing routines. The same applies to the thread names with more that 3-digit number.
This issue was found using the boundary tests under virtualised environment like QEMU.
Let us increase the buffer to fix such potential issues.
Reviewed-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Zeng Tao prime.zeng@hisilicon.com
Link: https://lore.kernel.org/r/1583294092-5929-1-git-send-email-prime.zeng@hisili... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/base/arch_topology.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 1eb81f113786f..83e26fd188cc9 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -270,7 +270,7 @@ static int __init get_cpu_for_node(struct device_node *node) static int __init parse_core(struct device_node *core, int package_id, int core_id) { - char name[10]; + char name[20]; bool leaf = true; int i = 0; int cpu; @@ -317,7 +317,7 @@ static int __init parse_core(struct device_node *core, int package_id,
static int __init parse_cluster(struct device_node *cluster, int depth) { - char name[10]; + char name[20]; bool leaf = true; bool has_cores = false; struct device_node *c;
From: afzal mohammed afzal.mohd.ma@gmail.com
[ Upstream commit 8719b6d29d2851fa84c4074bb2e5adc022911ab8 ]
request_irq() is preferred over setup_irq(). Invocations of setup_irq() occur after memory allocators are ready.
Per tglx[1], setup_irq() existed in olden days when allocators were not ready by the time early interrupts were initialized.
Hence replace setup_irq() by request_irq().
[1] https://lkml.kernel.org/r/alpine.DEB.2.20.1710191609480.1971@nanos
Signed-off-by: afzal mohammed afzal.mohd.ma@gmail.com Message-Id: 20200304005049.5291-1-afzal.mohd.ma@gmail.com [heiko.carstens@de.ibm.com: replace pr_err with panic] Signed-off-by: Heiko Carstens heiko.carstens@de.ibm.com Signed-off-by: Vasily Gorbik gor@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/s390/kernel/irq.c | 8 ++------ drivers/s390/cio/airq.c | 8 ++------ drivers/s390/cio/cio.c | 8 ++------ 3 files changed, 6 insertions(+), 18 deletions(-)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 8371855042dc2..da550cb8b31bd 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -294,11 +294,6 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy) return IRQ_HANDLED; }
-static struct irqaction external_interrupt = { - .name = "EXT", - .handler = do_ext_interrupt, -}; - void __init init_ext_interrupts(void) { int idx; @@ -308,7 +303,8 @@ void __init init_ext_interrupts(void)
irq_set_chip_and_handler(EXT_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); - setup_irq(EXT_INTERRUPT, &external_interrupt); + if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL)) + panic("Failed to register EXT interrupt\n"); }
static DEFINE_SPINLOCK(irq_subclass_lock); diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index 427b2e24a8cea..cb466ed7eb5ef 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -105,16 +105,12 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy) return IRQ_HANDLED; }
-static struct irqaction airq_interrupt = { - .name = "AIO", - .handler = do_airq_interrupt, -}; - void __init init_airq_interrupts(void) { irq_set_chip_and_handler(THIN_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); - setup_irq(THIN_INTERRUPT, &airq_interrupt); + if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL)) + panic("Failed to register AIO interrupt\n"); }
static inline unsigned long iv_size(unsigned long bits) diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 18f5458f90e8f..6d716db2a46ab 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -563,16 +563,12 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) return IRQ_HANDLED; }
-static struct irqaction io_interrupt = { - .name = "I/O", - .handler = do_cio_interrupt, -}; - void __init init_cio_interrupts(void) { irq_set_chip_and_handler(IO_INTERRUPT, &dummy_irq_chip, handle_percpu_irq); - setup_irq(IO_INTERRUPT, &io_interrupt); + if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL)) + panic("Failed to register I/O interrupt\n"); }
#ifdef CONFIG_CCW_CONSOLE
From: Leo Yan leo.yan@linaro.org
[ Upstream commit d01751563caf0dec7be36f81de77cc0197b77e59 ]
If use option '--itrace=iNNN' with Arm CoreSight trace data, perf tool fails inject instruction samples; the root cause is the packets are only swapped for branch samples and last branches but not for instruction samples, so the new coming packets cannot be properly handled for only synthesizing instruction samples.
To fix this issue, this patch refactors the code with a new function cs_etm__packet_swap() which is used to swap packets and adds the condition for instruction samples.
Signed-off-by: Leo Yan leo.yan@linaro.org Reviewed-by: Mathieu Poirier mathieu.poirier@linaro.org Reviewed-by: Mike Leach mike.leach@linaro.org Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Jiri Olsa jolsa@redhat.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Robert Walker robert.walker@arm.com Cc: Suzuki Poulouse suzuki.poulose@arm.com Cc: coresight ml coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: http://lore.kernel.org/lkml/20200219021811.20067-2-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/cs-etm.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-)
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index f5f855fff412e..38298cbb07524 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -363,6 +363,23 @@ struct cs_etm_packet_queue return NULL; }
+static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm, + struct cs_etm_traceid_queue *tidq) +{ + struct cs_etm_packet *tmp; + + if (etm->sample_branches || etm->synth_opts.last_branch || + etm->sample_instructions) { + /* + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for + * the next incoming packet. + */ + tmp = tidq->packet; + tidq->packet = tidq->prev_packet; + tidq->prev_packet = tmp; + } +} + static void cs_etm__packet_dump(const char *pkt_string) { const char *color = PERF_COLOR_BLUE; @@ -1340,7 +1357,6 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, struct cs_etm_traceid_queue *tidq) { struct cs_etm_auxtrace *etm = etmq->etm; - struct cs_etm_packet *tmp; int ret; u8 trace_chan_id = tidq->trace_chan_id; u64 instrs_executed = tidq->packet->instr_count; @@ -1404,15 +1420,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, } }
- if (etm->sample_branches || etm->synth_opts.last_branch) { - /* - * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for - * the next incoming packet. - */ - tmp = tidq->packet; - tidq->packet = tidq->prev_packet; - tidq->prev_packet = tmp; - } + cs_etm__packet_swap(etm, tidq);
return 0; } @@ -1441,7 +1449,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq, { int err = 0; struct cs_etm_auxtrace *etm = etmq->etm; - struct cs_etm_packet *tmp;
/* Handle start tracing packet */ if (tidq->prev_packet->sample_type == CS_ETM_EMPTY) @@ -1476,15 +1483,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq, }
swap_packet: - if (etm->sample_branches || etm->synth_opts.last_branch) { - /* - * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for - * the next incoming packet. - */ - tmp = tidq->packet; - tidq->packet = tidq->prev_packet; - tidq->prev_packet = tmp; - } + cs_etm__packet_swap(etm, tidq);
return err; }
From: Leo Yan leo.yan@linaro.org
[ Upstream commit c9f5baa136777b2c982f6f7a90c9da69a88be148 ]
When 'etm->instructions_sample_period' is less than 'tidq->period_instructions', the function cs_etm__sample() cannot handle this case properly with its logic.
Let's see below flow as an example:
- If we set itrace option '--itrace=i4', then function cs_etm__sample() has variables with initialized values:
tidq->period_instructions = 0 etm->instructions_sample_period = 4
- When the first packet is coming:
packet->instr_count = 10; the number of instructions executed in this packet is 10, thus update period_instructions as below:
tidq->period_instructions = 0 + 10 = 10 instrs_over = 10 - 4 = 6 offset = 10 - 6 - 1 = 3 tidq->period_instructions = instrs_over = 6
- When the second packet is coming:
packet->instr_count = 10; in the second pass, assume 10 instructions in the trace sample again:
tidq->period_instructions = 6 + 10 = 16 instrs_over = 16 - 4 = 12 offset = 10 - 12 - 1 = -3 -> the negative value tidq->period_instructions = instrs_over = 12
So after handle these two packets, there have below issues:
The first issue is that cs_etm__instr_addr() returns the address within the current trace sample of the instruction related to offset, so the offset is supposed to be always unsigned value. But in fact, function cs_etm__sample() might calculate a negative offset value (in handling the second packet, the offset is -3) and pass to cs_etm__instr_addr() with u64 type with a big positive integer.
The second issue is it only synthesizes 2 samples for sample period = 4. In theory, every packet has 10 instructions so the two packets have total 20 instructions, 20 instructions should generate 5 samples (4 x 5 = 20). This is because cs_etm__sample() only calls once cs_etm__synth_instruction_sample() to generate instruction sample per range packet.
This patch fixes the logic in function cs_etm__sample(); the basic idea for handling coming packet is:
- To synthesize the first instruction sample, it combines the left instructions from the previous packet and the head of the new packet; then generate continuous samples with sample period; - At the tail of the new packet, if it has the rest instructions, these instructions will be left for the sequential sample.
Suggested-by: Mike Leach mike.leach@linaro.org Signed-off-by: Leo Yan leo.yan@linaro.org Reviewed-by: Mathieu Poirier mathieu.poirier@linaro.org Reviewed-by: Mike Leach mike.leach@linaro.org Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Jiri Olsa jolsa@redhat.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Robert Walker robert.walker@arm.com Cc: Suzuki Poulouse suzuki.poulose@arm.com Cc: coresight ml coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: http://lore.kernel.org/lkml/20200219021811.20067-4-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/cs-etm.c | 87 ++++++++++++++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 17 deletions(-)
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 38298cbb07524..451eee24165ee 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -1359,9 +1359,12 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, struct cs_etm_auxtrace *etm = etmq->etm; int ret; u8 trace_chan_id = tidq->trace_chan_id; - u64 instrs_executed = tidq->packet->instr_count; + u64 instrs_prev;
- tidq->period_instructions += instrs_executed; + /* Get instructions remainder from previous packet */ + instrs_prev = tidq->period_instructions; + + tidq->period_instructions += tidq->packet->instr_count;
/* * Record a branch when the last instruction in @@ -1379,26 +1382,76 @@ static int cs_etm__sample(struct cs_etm_queue *etmq, * TODO: allow period to be defined in cycles and clock time */
- /* Get number of instructions executed after the sample point */ - u64 instrs_over = tidq->period_instructions - - etm->instructions_sample_period; + /* + * Below diagram demonstrates the instruction samples + * generation flows: + * + * Instrs Instrs Instrs Instrs + * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3) + * | | | | + * V V V V + * -------------------------------------------------- + * ^ ^ + * | | + * Period Period + * instructions(Pi) instructions(Pi') + * + * | | + * ---------------- -----------------/ + * V + * tidq->packet->instr_count + * + * Instrs Sample(n...) are the synthesised samples occurring + * every etm->instructions_sample_period instructions - as + * defined on the perf command line. Sample(n) is being the + * last sample before the current etm packet, n+1 to n+3 + * samples are generated from the current etm packet. + * + * tidq->packet->instr_count represents the number of + * instructions in the current etm packet. + * + * Period instructions (Pi) contains the the number of + * instructions executed after the sample point(n) from the + * previous etm packet. This will always be less than + * etm->instructions_sample_period. + * + * When generate new samples, it combines with two parts + * instructions, one is the tail of the old packet and another + * is the head of the new coming packet, to generate + * sample(n+1); sample(n+2) and sample(n+3) consume the + * instructions with sample period. After sample(n+3), the rest + * instructions will be used by later packet and it is assigned + * to tidq->period_instructions for next round calculation. + */
/* - * Calculate the address of the sampled instruction (-1 as - * sample is reported as though instruction has just been - * executed, but PC has not advanced to next instruction) + * Get the initial offset into the current packet instructions; + * entry conditions ensure that instrs_prev is less than + * etm->instructions_sample_period. */ - u64 offset = (instrs_executed - instrs_over - 1); - u64 addr = cs_etm__instr_addr(etmq, trace_chan_id, - tidq->packet, offset); + u64 offset = etm->instructions_sample_period - instrs_prev; + u64 addr;
- ret = cs_etm__synth_instruction_sample( - etmq, tidq, addr, etm->instructions_sample_period); - if (ret) - return ret; + while (tidq->period_instructions >= + etm->instructions_sample_period) { + /* + * Calculate the address of the sampled instruction (-1 + * as sample is reported as though instruction has just + * been executed, but PC has not advanced to next + * instruction) + */ + addr = cs_etm__instr_addr(etmq, trace_chan_id, + tidq->packet, offset - 1); + ret = cs_etm__synth_instruction_sample( + etmq, tidq, addr, + etm->instructions_sample_period); + if (ret) + return ret;
- /* Carry remaining instructions into next sample period */ - tidq->period_instructions = instrs_over; + offset += etm->instructions_sample_period; + tidq->period_instructions -= + etm->instructions_sample_period; + } }
if (etm->sample_branches) {
From: Wen Gong wgong@codeaurora.org
[ Upstream commit 402f2992b4d62760cce7c689ff216ea3bf4d6e8a ]
When use command to read values, it crashed.
command: dd if=/sys/kernel/debug/ieee80211/phy0/ath10k/mem_value count=1 bs=4 skip=$((0x100233))
It will call to ath10k_sdio_hif_diag_read with address = 0x4008cc and buf_len = 4.
Then system crash: [ 1786.013258] Unable to handle kernel paging request at virtual address ffffffc00bd45000 [ 1786.013273] Mem abort info: [ 1786.013281] ESR = 0x96000045 [ 1786.013291] Exception class = DABT (current EL), IL = 32 bits [ 1786.013299] SET = 0, FnV = 0 [ 1786.013307] EA = 0, S1PTW = 0 [ 1786.013314] Data abort info: [ 1786.013322] ISV = 0, ISS = 0x00000045 [ 1786.013330] CM = 0, WnR = 1 [ 1786.013342] swapper pgtable: 4k pages, 39-bit VAs, pgdp = 000000008542a60e [ 1786.013350] [ffffffc00bd45000] pgd=0000000000000000, pud=0000000000000000 [ 1786.013368] Internal error: Oops: 96000045 [#1] PREEMPT SMP [ 1786.013609] Process swapper/0 (pid: 0, stack limit = 0x0000000084b153c6) [ 1786.013623] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.86 #137 [ 1786.013631] Hardware name: MediaTek krane sku176 board (DT) [ 1786.013643] pstate: 80000085 (Nzcv daIf -PAN -UAO) [ 1786.013662] pc : __memcpy+0x94/0x180 [ 1786.013678] lr : swiotlb_tbl_unmap_single+0x84/0x150 [ 1786.013686] sp : ffffff8008003c60 [ 1786.013694] x29: ffffff8008003c90 x28: ffffffae96411f80 [ 1786.013708] x27: ffffffae960d2018 x26: ffffff8019a4b9a8 [ 1786.013721] x25: 0000000000000000 x24: 0000000000000001 [ 1786.013734] x23: ffffffae96567000 x22: 00000000000051d4 [ 1786.013747] x21: 0000000000000000 x20: 00000000fe6e9000 [ 1786.013760] x19: 0000000000000004 x18: 0000000000000020 [ 1786.013773] x17: 0000000000000001 x16: 0000000000000000 [ 1786.013787] x15: 00000000ffffffff x14: 00000000000044c0 [ 1786.013800] x13: 0000000000365ba4 x12: 0000000000000000 [ 1786.013813] x11: 0000000000000001 x10: 00000037be6e9000 [ 1786.013826] x9 : ffffffc940000000 x8 : 000000000bd45000 [ 1786.013839] x7 : 0000000000000000 x6 : ffffffc00bd45000 [ 1786.013852] x5 : 0000000000000000 x4 : 0000000000000000 [ 1786.013865] x3 : 0000000000000c00 x2 : 0000000000000004 [ 1786.013878] x1 : fffffff7be6e9004 x0 : ffffffc00bd45000 [ 1786.013891] Call trace: [ 1786.013903] __memcpy+0x94/0x180 [ 1786.013914] unmap_single+0x6c/0x84 [ 1786.013925] swiotlb_unmap_sg_attrs+0x54/0x80 [ 1786.013938] __swiotlb_unmap_sg_attrs+0x8c/0xa4 [ 1786.013952] msdc_unprepare_data+0x6c/0x84 [ 1786.013963] msdc_request_done+0x58/0x84 [ 1786.013974] msdc_data_xfer_done+0x1a0/0x1c8 [ 1786.013985] msdc_irq+0x12c/0x17c [ 1786.013996] __handle_irq_event_percpu+0xe4/0x250 [ 1786.014006] handle_irq_event_percpu+0x28/0x68 [ 1786.014015] handle_irq_event+0x48/0x78 [ 1786.014026] handle_fasteoi_irq+0xd0/0x1a0 [ 1786.014039] __handle_domain_irq+0x84/0xc4 [ 1786.014050] gic_handle_irq+0x124/0x1a4 [ 1786.014059] el1_irq+0xb0/0x128 [ 1786.014072] cpuidle_enter_state+0x298/0x328 [ 1786.014082] cpuidle_enter+0x30/0x40 [ 1786.014094] do_idle+0x190/0x268 [ 1786.014104] cpu_startup_entry+0x24/0x28 [ 1786.014116] rest_init+0xd4/0xe0 [ 1786.014126] start_kernel+0x30c/0x38c [ 1786.014139] Code: f8408423 f80084c3 36100062 b8404423 (b80044c3) [ 1786.014150] ---[ end trace 3b02ddb698ea69ee ]--- [ 1786.015415] Kernel panic - not syncing: Fatal exception in interrupt [ 1786.015433] SMP: stopping secondary CPUs [ 1786.015447] Kernel Offset: 0x2e8d200000 from 0xffffff8008000000 [ 1786.015458] CPU features: 0x0,2188200c [ 1786.015466] Memory Limit: none
For sdio chip, it need the memory which is kmalloc, if it is vmalloc from ath10k_mem_value_read, then it have a memory error. kzalloc of ath10k_sdio_hif_diag_read32 is the correct type, so add kzalloc in ath10k_sdio_hif_diag_read to replace the buffer which is vmalloc from ath10k_mem_value_read.
This patch only effect sdio chip.
Tested with QCA6174 SDIO with firmware WLAN.RMH.4.4.1-00029.
Signed-off-by: Wen Gong wgong@codeaurora.org Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/ath/ath10k/sdio.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 9870d2d095c87..8fe626deadeb0 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1582,23 +1582,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { int ret; + void *mem; + + mem = kzalloc(buf_len, GFP_KERNEL); + if (!mem) + return -ENOMEM;
/* set window register to start read cycle */ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); if (ret) { ath10k_warn(ar, "failed to set mbox window read address: %d", ret); - return ret; + goto out; }
/* read the data */ - ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len); + ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len); if (ret) { ath10k_warn(ar, "failed to read from mbox window data address: %d\n", ret); - return ret; + goto out; }
- return 0; + memcpy(buf, mem, buf_len); + +out: + kfree(mem); + + return ret; }
static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
From: Sagar Biradar Sagar.Biradar@microchip.com
[ Upstream commit bef18d308a2215eff8c3411a23d7f34604ce56c3 ]
Fixes the occasional adapter panic when sg_reset is issued with -d, -t, -b and -H flags. Removal of command type HBA_IU_TYPE_SCSI_TM_REQ in aac_hba_send since iu_type, request_id and fib_flags are not populated. Device and target reset handlers are made to send TMF commands only when reset_state is 0.
Link: https://lore.kernel.org/r/1581553771-25796-1-git-send-email-Sagar.Biradar@mi... Reviewed-by: Sagar Biradar Sagar.Biradar@microchip.com Signed-off-by: Sagar Biradar Sagar.Biradar@microchip.com Signed-off-by: Balsundar P balsundar.p@microsemi.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/aacraid/commsup.c | 2 +- drivers/scsi/aacraid/linit.c | 34 +++++++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 10 deletions(-)
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 2142a649e865b..90fb17c5dd69c 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -728,7 +728,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; - } else if (command != HBA_IU_TYPE_SCSI_TM_REQ) + } else return -EINVAL;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 4a858789e6c5e..514aed38b5afe 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -723,7 +723,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, (fib_callback) aac_hba_callback, (void *) cmd); - + if (status != -EINPROGRESS) { + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 secs for completion */ for (count = 0; count < 15; ++count) { if (cmd->SCp.sent_command) { @@ -902,11 +906,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host device reset request. SCSI hang ?\n", AAC_DRIVERNAME);
fib = aac_fib_alloc(aac); @@ -921,7 +925,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) status = aac_hba_send(command, fib, (fib_callback) aac_tmf_callback, (void *) info); - + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state == 0) { @@ -960,11 +969,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
info = &aac->hba_map[bus][cid];
- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED;
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host target reset request. SCSI hang ?\n", AAC_DRIVERNAME);
fib = aac_fib_alloc(aac); @@ -981,6 +990,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) (fib_callback) aac_tmf_callback, (void *) info);
+ if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state <= 0) { @@ -1033,7 +1049,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) } }
- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); + pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
/* * Check the health of the controller
From: Howard Chung howardchung@google.com
[ Upstream commit 96298f640104e4cd9a913a6e50b0b981829b94ff ]
According to Core Spec Version 5.2 | Vol 3, Part A 6.1.5, the incoming L2CAP_ConfigReq should be handled during OPEN state.
The section below shows the btmon trace when running L2CAP/COS/CFD/BV-12-C before and after this change.
=== Before === ...
ACL Data RX: Handle 256 flags 0x02 dlen 12 #22
L2CAP: Connection Request (0x02) ident 2 len 4 PSM: 1 (0x0001) Source CID: 65 < ACL Data TX: Handle 256 flags 0x00 dlen 16 #23 L2CAP: Connection Response (0x03) ident 2 len 8 Destination CID: 64 Source CID: 65 Result: Connection successful (0x0000) Status: No further information available (0x0000) < ACL Data TX: Handle 256 flags 0x00 dlen 12 #24 L2CAP: Configure Request (0x04) ident 2 len 4 Destination CID: 65 Flags: 0x0000
HCI Event: Number of Completed Packets (0x13) plen 5 #25
Num handles: 1 Handle: 256 Count: 1
HCI Event: Number of Completed Packets (0x13) plen 5 #26
Num handles: 1 Handle: 256 Count: 1
ACL Data RX: Handle 256 flags 0x02 dlen 16 #27
L2CAP: Configure Request (0x04) ident 3 len 8 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 .. < ACL Data TX: Handle 256 flags 0x00 dlen 18 #28 L2CAP: Configure Response (0x05) ident 3 len 10 Source CID: 65 Flags: 0x0000 Result: Success (0x0000) Option: Maximum Transmission Unit (0x01) [mandatory] MTU: 672
HCI Event: Number of Completed Packets (0x13) plen 5 #29
Num handles: 1 Handle: 256 Count: 1
ACL Data RX: Handle 256 flags 0x02 dlen 14 #30
L2CAP: Configure Response (0x05) ident 2 len 6 Source CID: 64 Flags: 0x0000 Result: Success (0x0000)
ACL Data RX: Handle 256 flags 0x02 dlen 20 #31
L2CAP: Configure Request (0x04) ident 3 len 12 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 91 02 11 11 ...... < ACL Data TX: Handle 256 flags 0x00 dlen 14 #32 L2CAP: Command Reject (0x01) ident 3 len 6 Reason: Invalid CID in request (0x0002) Destination CID: 64 Source CID: 65
HCI Event: Number of Completed Packets (0x13) plen 5 #33
Num handles: 1 Handle: 256 Count: 1 ... === After === ...
ACL Data RX: Handle 256 flags 0x02 dlen 12 #22
L2CAP: Connection Request (0x02) ident 2 len 4 PSM: 1 (0x0001) Source CID: 65 < ACL Data TX: Handle 256 flags 0x00 dlen 16 #23 L2CAP: Connection Response (0x03) ident 2 len 8 Destination CID: 64 Source CID: 65 Result: Connection successful (0x0000) Status: No further information available (0x0000) < ACL Data TX: Handle 256 flags 0x00 dlen 12 #24 L2CAP: Configure Request (0x04) ident 2 len 4 Destination CID: 65 Flags: 0x0000
HCI Event: Number of Completed Packets (0x13) plen 5 #25
Num handles: 1 Handle: 256 Count: 1
HCI Event: Number of Completed Packets (0x13) plen 5 #26
Num handles: 1 Handle: 256 Count: 1
ACL Data RX: Handle 256 flags 0x02 dlen 16 #27
L2CAP: Configure Request (0x04) ident 3 len 8 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 .. < ACL Data TX: Handle 256 flags 0x00 dlen 18 #28 L2CAP: Configure Response (0x05) ident 3 len 10 Source CID: 65 Flags: 0x0000 Result: Success (0x0000) Option: Maximum Transmission Unit (0x01) [mandatory] MTU: 672
HCI Event: Number of Completed Packets (0x13) plen 5 #29
Num handles: 1 Handle: 256 Count: 1
ACL Data RX: Handle 256 flags 0x02 dlen 14 #30
L2CAP: Configure Response (0x05) ident 2 len 6 Source CID: 64 Flags: 0x0000 Result: Success (0x0000)
ACL Data RX: Handle 256 flags 0x02 dlen 20 #31
L2CAP: Configure Request (0x04) ident 3 len 12 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 91 02 11 11 ..... < ACL Data TX: Handle 256 flags 0x00 dlen 18 #32 L2CAP: Configure Response (0x05) ident 3 len 10 Source CID: 65 Flags: 0x0000 Result: Success (0x0000) Option: Maximum Transmission Unit (0x01) [mandatory] MTU: 672 < ACL Data TX: Handle 256 flags 0x00 dlen 12 #33 L2CAP: Configure Request (0x04) ident 3 len 4 Destination CID: 65 Flags: 0x0000
HCI Event: Number of Completed Packets (0x13) plen 5 #34
Num handles: 1 Handle: 256 Count: 1
HCI Event: Number of Completed Packets (0x13) plen 5 #35
Num handles: 1 Handle: 256 Count: 1 ...
Signed-off-by: Howard Chung howardchung@google.com Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/l2cap_core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index eb2804ac50756..12a50e5a9f452 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4134,7 +4134,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, return 0; }
- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && + chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock;
From: Colin Ian King colin.king@canonical.com
[ Upstream commit a7463e2dc698075132de9905b89f495df888bb79 ]
The shifting of buf[3] by 24 bits to the left will be promoted to a 32 bit signed int and then sign-extended to an unsigned long. In the unlikely event that the the top bit of buf[3] is set then all then all the upper bits end up as also being set because of the sign-extension and this affect the ev->post_bit_error sum. Fix this by using the temporary u32 variable bit_error to avoid the sign-extension promotion. This also removes the need to do the computation twice.
Addresses-Coverity: ("Unintended sign extension")
Fixes: 267897a4708f ("[media] tda10071: implement DVBv5 statistics") Signed-off-by: Colin Ian King colin.king@canonical.com Signed-off-by: Sean Young sean@mess.org Signed-off-by: Mauro Carvalho Chehab mchehab+huawei@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/media/dvb-frontends/tda10071.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 1953b00b3e487..685c0ac71819e 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -470,10 +470,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) goto error;
if (dev->delivery_system == SYS_DVBS) { - dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; - dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; + u32 bit_error = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3] << 0; + + dev->dvbv3_ber = bit_error; + dev->post_bit_error += bit_error; c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue = dev->post_bit_error; dev->block_error += buf[4] << 8 | buf[5] << 0;
From: Palmer Dabbelt palmerdabbelt@google.com
[ Upstream commit 4cbd7814bbd595061fcb6d6355d63f04179161cd ]
SiFive's UART has a software controller clock divider that produces the final baud rate clock. Whenever the clock that drives the UART is changed this divider must be updated accordingly, and given that these two events are controlled by software they cannot be done atomically. During the period between updating the UART's driving clock and internal divider the UART will transmit a different baud rate than what the user has configured, which will probably result in a corrupted transmission stream.
The SiFive UART has a FIFO, but due to an issue with the programming interface there is no way to directly determine when the UART has finished transmitting. We're essentially restricted to dead reckoning in order to figure that out: we can use the FIFO's TX busy register to figure out when the last frame has begun transmission and just delay for a long enough that the last frame is guaranteed to get out.
As far as the actual implementation goes: I've modified the existing existing clock notifier function to drain both the FIFO and the shift register in on PRE_RATE_CHANGE. As far as I know there is no hardware flow control in this UART, so there's no good way to ask the other end to stop transmission while we can't receive (inserting software flow control messages seems like a bad idea here).
Signed-off-by: Palmer Dabbelt palmerdabbelt@google.com Tested-by: Yash Shah yash.shah@sifive.com Link: https://lore.kernel.org/r/20200307042637.83728-1-palmer@dabbelt.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/serial/sifive.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 38133eba83a87..b4343c6aa6512 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -618,10 +618,10 @@ static void sifive_serial_shutdown(struct uart_port *port) * * On the V0 SoC, the UART IP block is derived from the CPU clock source * after a synchronous divide-by-two divider, so any CPU clock rate change - * requires the UART baud rate to be updated. This presumably could corrupt any - * serial word currently being transmitted or received. It would probably - * be better to stop receives and transmits, then complete the baud rate - * change, then re-enable them. + * requires the UART baud rate to be updated. This presumably corrupts any + * serial word currently being transmitted or received. In order to avoid + * corrupting the output data stream, we drain the transmit queue before + * allowing the clock's rate to be changed. */ static int sifive_serial_clk_notifier(struct notifier_block *nb, unsigned long event, void *data) @@ -629,6 +629,26 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb, struct clk_notifier_data *cnd = data; struct sifive_serial_port *ssp = notifier_to_sifive_serial_port(nb);
+ if (event == PRE_RATE_CHANGE) { + /* + * The TX watermark is always set to 1 by this driver, which + * means that the TX busy bit will lower when there are 0 bytes + * left in the TX queue -- in other words, when the TX FIFO is + * empty. + */ + __ssp_wait_for_xmitr(ssp); + /* + * On the cycle the TX FIFO goes empty there is still a full + * UART frame left to be transmitted in the shift register. + * The UART provides no way for software to directly determine + * when that last frame has been transmitted, so we just sleep + * here instead. As we're not tracking the number of stop bits + * they're just worst cased here. The rest of the serial + * framing parameters aren't configurable by software. + */ + udelay(DIV_ROUND_UP(12 * 1000 * 1000, ssp->baud_rate)); + } + if (event == POST_RATE_CHANGE && ssp->clkin_rate != cnd->new_rate) { ssp->clkin_rate = cnd->new_rate; __ssp_update_div(ssp);
From: "Darrick J. Wong" darrick.wong@oracle.com
[ Upstream commit 1cb5deb5bc095c070c09a4540c45f9c9ba24be43 ]
If we decide that a directory free block is corrupt, we must take care not to leak a buffer pointer to the caller. After xfs_trans_brelse returns, the buffer can be freed or reused, which means that we have to set *bpp back to NULL.
Callers are supposed to notice the nonzero return value and not use the buffer pointer, but we should code more defensively, even if all current callers handle this situation correctly.
Fixes: de14c5f541e7 ("xfs: verify free block header fields") Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Reviewed-by: Dave Chinner dchinner@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/libxfs/xfs_dir2_node.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index 705c4f5627582..99d5b2ed67f2e 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -210,6 +210,7 @@ __xfs_dir3_free_read( if (fa) { xfs_verifier_error(*bpp, -EFSCORRUPTED, fa); xfs_trans_brelse(tp, *bpp); + *bpp = NULL; return -EFSCORRUPTED; }
From: "Darrick J. Wong" darrick.wong@oracle.com
[ Upstream commit 2e107cf869eecc770e3f630060bb4e5f547d0fd8 ]
In xchk_dir_actor, we attempt to validate the directory hash structures by performing a directory entry lookup by (hashed) name. If the lookup returns ENOENT, that means that the hash information is corrupt. The _process_error functions don't catch this, so we have to add that explicitly.
Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Reviewed-by: Dave Chinner dchinner@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/scrub/dir.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index 1e2e11721eb99..20eca2d8e7c77 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -152,6 +152,9 @@ xchk_dir_actor( xname.type = XFS_DIR3_FT_UNKNOWN;
error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); + /* ENOENT means the hash lookup failed and the dir is corrupt */ + if (error == -ENOENT) + error = -EFSCORRUPTED; if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, &error)) goto out;
From: Dmitry Monakhov dmonakhov@gmail.com
[ Upstream commit eb5760863fc28feab28b567ddcda7e667e638da0 ]
We already has similar code in ext4_mb_complex_scan_group(), but ext4_mb_simple_scan_group() still affected.
Other reports: https://www.spinics.net/lists/linux-ext4/msg60231.html
Reviewed-by: Andreas Dilger adilger@dilger.ca Signed-off-by: Dmitry Monakhov dmonakhov@gmail.com Link: https://lore.kernel.org/r/20200310150156.641-1-dmonakhov@gmail.com Signed-off-by: Theodore Ts'o tytso@mit.edu Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ext4/mballoc.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index e1782b2e2e2dd..e5d43d2ee474d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, BUG_ON(buddy == NULL);
k = mb_find_next_zero_bit(buddy, max, 0); - BUG_ON(k >= max); - + if (k >= max) { + ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, + "%d free clusters of order %d. But found 0", + grp->bb_counters[i], i); + ext4_mark_group_bitmap_corrupted(ac->ac_sb, + e4b->bd_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); + break; + } ac->ac_found++;
ac->ac_b_ex.fe_len = 1 << i;
From: Stefan Berger stefanb@linux.ibm.com
[ Upstream commit d8d74ea3c00214aee1e1826ca18e77944812b9b4 ]
Synchronize with the results from the CRQs before continuing with the initialization. This avoids trying to send TPM commands while the rtce buffer has not been allocated, yet.
This patch fixes an existing race condition that may occurr if the hypervisor does not quickly respond to the VTPM_GET_RTCE_BUFFER_SIZE request sent during initialization and therefore the ibmvtpm->rtce_buf has not been allocated at the time the first TPM command is sent.
Fixes: 132f76294744 ("drivers/char/tpm: Add new device driver to support IBM vTPM") Signed-off-by: Stefan Berger stefanb@linux.ibm.com Acked-by: Nayna Jain nayna@linux.ibm.com Tested-by: Nayna Jain nayna@linux.ibm.com Reviewed-by: Jarkko Sakkinen jarkko.sakkinen@linux.intel.com Signed-off-by: Jarkko Sakkinen jarkko.sakkinen@linux.intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/tpm/tpm_ibmvtpm.c | 9 +++++++++ drivers/char/tpm/tpm_ibmvtpm.h | 1 + 2 files changed, 10 insertions(+)
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index e82013d587b46..64428dbed9928 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -581,6 +581,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) */ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { ibmvtpm_crq_process(crq, ibmvtpm); + wake_up_interruptible(&ibmvtpm->crq_queue.wq); crq->valid = 0; smp_wmb(); } @@ -628,6 +629,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, }
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); + init_waitqueue_head(&crq_q->wq); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); @@ -680,6 +682,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (rc) goto init_irq_cleanup;
+ if (!wait_event_timeout(ibmvtpm->crq_queue.wq, + ibmvtpm->rtce_buf != NULL, + HZ)) { + dev_err(dev, "CRQ response timed out\n"); + goto init_irq_cleanup; + } + return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index 7983f1a33267e..b92aa7d3e93e7 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue { struct ibmvtpm_crq *crq_addr; u32 index; u32 num_entry; + wait_queue_head_t wq; };
struct ibmvtpm_dev {
From: Alexandre Belloni alexandre.belloni@bootlin.com
[ Upstream commit f2997775b111c6d660c32a18d5d44d37cb7361b1 ]
Both RTC IRQs are requested before the struct rtc_device is allocated, this may lead to a NULL pointer dereference in the IRQ handler.
To fix this issue, allocating the rtc_device struct before requesting the IRQs using devm_rtc_allocate_device, and use rtc_register_device to register the RTC device.
Link: https://lore.kernel.org/r/20200306010146.39762-1-alexandre.belloni@bootlin.c... Signed-off-by: Alexandre Belloni alexandre.belloni@bootlin.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/rtc/rtc-sa1100.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 86fa723b3b762..795273269d58e 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -182,7 +182,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) { - struct rtc_device *rtc; int ret;
spin_lock_init(&info->lock); @@ -211,15 +210,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) writel_relaxed(0, info->rcnr); }
- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { + info->rtc->ops = &sa1100_rtc_ops; + info->rtc->max_user_freq = RTC_FREQ; + + ret = rtc_register_device(info->rtc); + if (ret) { clk_disable_unprepare(info->clk); - return PTR_ERR(rtc); + return ret; } - info->rtc = rtc; - - rtc->max_user_freq = RTC_FREQ;
/* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). @@ -268,6 +266,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev) info->irq_1hz = irq_1hz; info->irq_alarm = irq_alarm;
+ info->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", &pdev->dev); if (ret) {
From: Alexandre Belloni alexandre.belloni@bootlin.com
[ Upstream commit c11af8131a4e7ba1960faed731ee7e84c2c13c94 ]
The RTC IRQ is requested before the struct rtc_device is allocated, this may lead to a NULL pointer dereference in the IRQ handler.
To fix this issue, allocating the rtc_device struct before requesting the RTC IRQ using devm_rtc_allocate_device, and use rtc_register_device to register the RTC device.
Link: https://lore.kernel.org/r/20200306073404.56921-1-alexandre.belloni@bootlin.c... Signed-off-by: Alexandre Belloni alexandre.belloni@bootlin.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/rtc/rtc-ds1374.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 367497914c100..28eb96cbaf98b 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client, if (!ds1374) return -ENOMEM;
+ ds1374->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(ds1374->rtc)) + return PTR_ERR(ds1374->rtc); + ds1374->client = client; i2c_set_clientdata(client, ds1374);
@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client, device_set_wakeup_capable(&client->dev, 1); }
- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, - &ds1374_rtc_ops, THIS_MODULE); - if (IS_ERR(ds1374->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); - return PTR_ERR(ds1374->rtc); - } + ds1374->rtc->ops = &ds1374_rtc_ops; + + ret = rtc_register_device(ds1374->rtc); + if (ret) + return ret;
#ifdef CONFIG_RTC_DRV_DS1374_WDT save_client = client;
From: Trond Myklebust trondmy@gmail.com
[ Upstream commit a451b12311aa8c96c6f6e01c783a86995dc3ec6b ]
In NFSv4, the lock stateids are tied to the lockowner, and the open stateid, so that the action of closing the file also results in either an automatic loss of the locks, or an error of the form NFS4ERR_LOCKS_HELD.
In practice this means we must not add new locks to the open stateid after the close process has been invoked. In fact doing so, can result in the following panic:
kernel BUG at lib/list_debug.c:51! invalid opcode: 0000 [#1] SMP NOPTI CPU: 2 PID: 1085 Comm: nfsd Not tainted 5.6.0-rc3+ #2 Hardware name: VMware, Inc. VMware7,1/440BX Desktop Reference Platform, BIOS VMW71.00V.14410784.B64.1908150010 08/15/2019 RIP: 0010:__list_del_entry_valid.cold+0x31/0x55 Code: 1a 3d 9b e8 74 10 c2 ff 0f 0b 48 c7 c7 f0 1a 3d 9b e8 66 10 c2 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 b0 1a 3d 9b e8 52 10 c2 ff <0f> 0b 48 89 fe 4c 89 c2 48 c7 c7 78 1a 3d 9b e8 3e 10 c2 ff 0f 0b RSP: 0018:ffffb296c1d47d90 EFLAGS: 00010246 RAX: 0000000000000054 RBX: ffff8ba032456ec8 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff8ba039e99cc8 RDI: ffff8ba039e99cc8 RBP: ffff8ba032456e60 R08: 0000000000000781 R09: 0000000000000003 R10: 0000000000000000 R11: 0000000000000001 R12: ffff8ba009a4abe0 R13: ffff8ba032456e8c R14: 0000000000000000 R15: ffff8ba00adb01d8 FS: 0000000000000000(0000) GS:ffff8ba039e80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fb213f0b008 CR3: 00000001347de006 CR4: 00000000003606e0 Call Trace: release_lock_stateid+0x2b/0x80 [nfsd] nfsd4_free_stateid+0x1e9/0x210 [nfsd] nfsd4_proc_compound+0x414/0x700 [nfsd] ? nfs4svc_decode_compoundargs+0x407/0x4c0 [nfsd] nfsd_dispatch+0xc1/0x200 [nfsd] svc_process_common+0x476/0x6f0 [sunrpc] ? svc_sock_secure_port+0x12/0x30 [sunrpc] ? svc_recv+0x313/0x9c0 [sunrpc] ? nfsd_svc+0x2d0/0x2d0 [nfsd] svc_process+0xd4/0x110 [sunrpc] nfsd+0xe3/0x140 [nfsd] kthread+0xf9/0x130 ? nfsd_destroy+0x50/0x50 [nfsd] ? kthread_park+0x90/0x90 ret_from_fork+0x1f/0x40
The fix is to ensure that lock creation tests for whether or not the open stateid is unhashed, and to fail if that is the case.
Fixes: 659aefb68eca ("nfsd: Ensure we don't recognise lock stateids after freeing them") Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Signed-off-by: Chuck Lever chuck.lever@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfsd/nfs4state.c | 73 ++++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 30 deletions(-)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 68cf116607645..8cb2f744dde6b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -495,6 +495,8 @@ find_any_file(struct nfs4_file *f) { struct nfsd_file *ret;
+ if (!f) + return NULL; spin_lock(&f->fi_lock); ret = __nfs4_get_fd(f, O_RDWR); if (!ret) { @@ -1273,6 +1275,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) nfs4_free_stateowner(sop); }
+static bool +nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) +{ + return list_empty(&stp->st_perfile); +} + static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; @@ -1343,9 +1351,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ if (!unhash_ol_stateid(stp)) + return false; list_del_init(&stp->st_locks); nfs4_unhash_stid(&stp->st_stid); - return unhash_ol_stateid(stp); + return true; }
static void release_lock_stateid(struct nfs4_ol_stateid *stp) @@ -1410,13 +1420,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { - bool unhashed; - lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
- unhashed = unhash_ol_stateid(stp); + if (!unhash_ol_stateid(stp)) + return false; release_open_stateid_locks(stp, reaplist); - return unhashed; + return true; }
static void release_open_stateid(struct nfs4_ol_stateid *stp) @@ -6267,21 +6276,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, }
static struct nfs4_ol_stateid * -find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) +find_lock_stateid(const struct nfs4_lockowner *lo, + const struct nfs4_ol_stateid *ost) { struct nfs4_ol_stateid *lst; - struct nfs4_client *clp = lo->lo_owner.so_client;
- lockdep_assert_held(&clp->cl_lock); + lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { - if (lst->st_stid.sc_type != NFS4_LOCK_STID) - continue; - if (lst->st_stid.sc_file == fp) { - refcount_inc(&lst->st_stid.sc_count); - return lst; + /* If ost is not hashed, ost->st_locks will not be valid */ + if (!nfs4_ol_stateid_unhashed(ost)) + list_for_each_entry(lst, &ost->st_locks, st_locks) { + if (lst->st_stateowner == &lo->lo_owner) { + refcount_inc(&lst->st_stid.sc_count); + return lst; + } } - } return NULL; }
@@ -6297,11 +6306,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); retry: spin_lock(&clp->cl_lock); - spin_lock(&fp->fi_lock); - retstp = find_lock_stateid(lo, fp); + if (nfs4_ol_stateid_unhashed(open_stp)) + goto out_close; + retstp = find_lock_stateid(lo, open_stp); if (retstp) - goto out_unlock; - + goto out_found; refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); @@ -6310,22 +6319,26 @@ retry: stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; + spin_lock(&fp->fi_lock); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); -out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&clp->cl_lock); - if (retstp) { - if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { - nfs4_put_stid(&retstp->st_stid); - goto retry; - } - /* To keep mutex tracking happy */ - mutex_unlock(&stp->st_mutex); - stp = retstp; - } return stp; +out_found: + spin_unlock(&clp->cl_lock); + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } + /* To keep mutex tracking happy */ + mutex_unlock(&stp->st_mutex); + return retstp; +out_close: + spin_unlock(&clp->cl_lock); + mutex_unlock(&stp->st_mutex); + return NULL; }
static struct nfs4_ol_stateid * @@ -6340,7 +6353,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
*new = false; spin_lock(&clp->cl_lock); - lst = find_lock_stateid(lo, fi); + lst = find_lock_stateid(lo, ost); spin_unlock(&clp->cl_lock); if (lst != NULL) { if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
From: Jason Gunthorpe jgg@mellanox.com
[ Upstream commit bede86a39d9dc3387ac00dcb8e1ac221676b2f25 ]
When creating a cm_id during REQ the id immediately becomes visible to the other MAD handlers, and shortly after the state is moved to IB_CM_REQ_RCVD
This allows cm_rej_handler() to run concurrently and free the work:
CPU 0 CPU1 cm_req_handler() ib_create_cm_id() cm_match_req() id_priv->state = IB_CM_REQ_RCVD cm_rej_handler() cm_acquire_id() spin_lock(&id_priv->lock) switch (id_priv->state) case IB_CM_REQ_RCVD: cm_reset_to_idle() kfree(id_priv->timewait_info); goto destroy destroy: kfree(id_priv->timewait_info); id_priv->timewait_info = NULL
Causing a double free or worse.
Do not free the timewait_info without also holding the id_priv->lock. Simplify this entire flow by making the free unconditional during cm_destroy_id() and removing the confusing special case error unwind during creation of the timewait_info.
This also fixes a leak of the timewait if cm_destroy_id() is called in IB_CM_ESTABLISHED with an XRC TGT QP. The state machine will be left in ESTABLISHED while it needed to transition through IB_CM_TIMEWAIT to release the timewait pointer.
Also fix a leak of the timewait_info if the caller mis-uses the API and does ib_send_cm_reqs().
Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") Link: https://lore.kernel.org/r/20200310092545.251365-4-leon@kernel.org Signed-off-by: Leon Romanovsky leonro@mellanox.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/core/cm.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 09af96ec41dd6..c1d6a068f50fe 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1092,14 +1092,22 @@ retest: break; }
- spin_lock_irq(&cm.lock); + spin_lock_irq(&cm_id_priv->lock); + spin_lock(&cm.lock); + /* Required for cleanup paths related cm_req_handler() */ + if (cm_id_priv->timewait_info) { + cm_cleanup_timewait(cm_id_priv->timewait_info); + kfree(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; + } if (!list_empty(&cm_id_priv->altr_list) && (!cm_id_priv->altr_send_port_not_ready)) list_del(&cm_id_priv->altr_list); if (!list_empty(&cm_id_priv->prim_list) && (!cm_id_priv->prim_send_port_not_ready)) list_del(&cm_id_priv->prim_list); - spin_unlock_irq(&cm.lock); + spin_unlock(&cm.lock); + spin_unlock_irq(&cm_id_priv->lock);
cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); @@ -1416,7 +1424,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_IDLE) { + if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; @@ -1434,12 +1442,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, param->ppath_sgid_attr, &cm_id_priv->av, cm_id_priv); if (ret) - goto error1; + goto out; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, cm_id_priv); if (ret) - goto error1; + goto out; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); @@ -1457,7 +1465,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) - goto error1; + goto out;
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); @@ -1480,7 +1488,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, return 0;
error2: cm_free_msg(cm_id_priv->msg); -error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); @@ -1965,7 +1972,7 @@ static int cm_req_handler(struct cm_work *work) pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, be32_to_cpu(cm_id->local_id)); ret = -EINVAL; - goto free_timeinfo; + goto destroy; }
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; @@ -2050,8 +2057,6 @@ static int cm_req_handler(struct cm_work *work) rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); -free_timeinfo: - kfree(cm_id_priv->timewait_info); destroy: ib_destroy_cm_id(cm_id); return ret;
From: Alexander Shishkin alexander.shishkin@linux.intel.com
[ Upstream commit 397c7729665a3b07a7b4ce7215173df8e9112809 ]
Some versions of Intel TH have an issue that prevents the multi mode of MSU from working correctly, resulting in no trace data and potentially stuck MSU pipeline.
Disable multi mode on such devices.
Signed-off-by: Alexander Shishkin alexander.shishkin@linux.intel.com Reviewed-by: Andy Shevchenko andriy.shevchenko@linux.intel.com Link: https://lore.kernel.org/r/20200317062215.15598-2-alexander.shishkin@linux.in... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/hwtracing/intel_th/intel_th.h | 2 ++ drivers/hwtracing/intel_th/msu.c | 11 +++++++++-- drivers/hwtracing/intel_th/pci.c | 8 ++++++-- 3 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 6f4f5486fe6dc..5fe694708b7a3 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -47,11 +47,13 @@ struct intel_th_output { /** * struct intel_th_drvdata - describes hardware capabilities and quirks * @tscu_enable: device needs SW to enable time stamping unit + * @multi_is_broken: device has multiblock mode is broken * @has_mintctl: device has interrupt control (MINTCTL) register * @host_mode_only: device can only operate in 'host debugger' mode */ struct intel_th_drvdata { unsigned int tscu_enable : 1, + multi_is_broken : 1, has_mintctl : 1, host_mode_only : 1; }; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 255f8f41c8ff7..3cd2489d398c5 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -157,7 +157,8 @@ struct msc { /* config */ unsigned int enabled : 1, wrap : 1, - do_irq : 1; + do_irq : 1, + multi_is_broken : 1; unsigned int mode; unsigned int burst_len; unsigned int index; @@ -1665,7 +1666,7 @@ static int intel_th_msc_init(struct msc *msc) { atomic_set(&msc->user_count, -1);
- msc->mode = MSC_MODE_MULTI; + msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI; mutex_init(&msc->buf_mutex); INIT_LIST_HEAD(&msc->win_list); INIT_LIST_HEAD(&msc->iter_list); @@ -1877,6 +1878,9 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf, return -EINVAL;
found: + if (i == MSC_MODE_MULTI && msc->multi_is_broken) + return -EOPNOTSUPP; + mutex_lock(&msc->buf_mutex); ret = 0;
@@ -2083,6 +2087,9 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (!res) msc->do_irq = 1;
+ if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken)) + msc->multi_is_broken = 1; + msc->index = thdev->id;
msc->thdev = thdev; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0d26484d67955..21fdf0b935166 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -120,6 +120,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); }
+static const struct intel_th_drvdata intel_th_1x_multi_is_broken = { + .multi_is_broken = 1, +}; + static const struct intel_th_drvdata intel_th_2x = { .tscu_enable = 1, .has_mintctl = 1, @@ -152,7 +156,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = { { /* Kaby Lake PCH-H */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), - .driver_data = (kernel_ulong_t)0, + .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken, }, { /* Denverton */ @@ -207,7 +211,7 @@ static const struct pci_device_id intel_th_pci_id_table[] = { { /* Comet Lake PCH-V */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6), - .driver_data = (kernel_ulong_t)&intel_th_2x, + .driver_data = (kernel_ulong_t)&intel_th_1x_multi_is_broken, }, { /* Ice Lake NNPI */
From: Gustavo Romero gromero@linux.ibm.com
[ Upstream commit 1dff3064c764b5a51c367b949b341d2e38972bec ]
On P9 DD2.2 due to a CPU defect some TM instructions need to be emulated by KVM. This is handled at first by the hardware raising a softpatch interrupt when certain TM instructions that need KVM assistance are executed in the guest. Althought some TM instructions per Power ISA are invalid forms they can raise a softpatch interrupt too. For instance, 'tresume.' instruction as defined in the ISA must have bit 31 set (1), but an instruction that matches 'tresume.' PO and XO opcode fields but has bit 31 not set (0), like 0x7cfe9ddc, also raises a softpatch interrupt. Similarly for 'treclaim.' and 'trechkpt.' instructions with bit 31 = 0, i.e. 0x7c00075c and 0x7c0007dc, respectively. Hence, if a code like the following is executed in the guest it will raise a softpatch interrupt just like a 'tresume.' when the TM facility is enabled ('tabort. 0' in the example is used only to enable the TM facility):
int main() { asm("tabort. 0; .long 0x7cfe9ddc;"); }
Currently in such a case KVM throws a complete trace like:
[345523.705984] WARNING: CPU: 24 PID: 64413 at arch/powerpc/kvm/book3s_hv_tm.c:211 kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv] [345523.705985] Modules linked in: kvm_hv(E) xt_conntrack ipt_REJECT nf_reject_ipv4 xt_tcpudp ip6table_mangle ip6table_nat iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter bridge stp llc sch_fq_codel ipmi_powernv at24 vmx_crypto ipmi_devintf ipmi_msghandler ibmpowernv uio_pdrv_genirq kvm opal_prd uio leds_powernv ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ip_tables x_tables autofs4 btrfs blake2b_generic zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx libcrc32c xor raid6_pq raid1 raid0 multipath linear tg3 crct10dif_vpmsum crc32c_vpmsum ipr [last unloaded: kvm_hv] [345523.706030] CPU: 24 PID: 64413 Comm: CPU 0/KVM Tainted: G W E 5.5.0+ #1 [345523.706031] NIP: c0080000072cb9c0 LR: c0080000072b5e80 CTR: c0080000085c7850 [345523.706034] REGS: c000000399467680 TRAP: 0700 Tainted: G W E (5.5.0+) [345523.706034] MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 24022428 XER: 00000000 [345523.706042] CFAR: c0080000072b5e7c IRQMASK: 0 GPR00: c0080000072b5e80 c000000399467910 c0080000072db500 c000000375ccc720 GPR04: c000000375ccc720 00000003fbec0000 0000a10395dda5a6 0000000000000000 GPR08: 000000007cfe9ddc 7cfe9ddc000005dc 7cfe9ddc7c0005dc c0080000072cd530 GPR12: c0080000085c7850 c0000003fffeb800 0000000000000001 00007dfb737f0000 GPR16: c0002001edcca558 0000000000000000 0000000000000000 0000000000000001 GPR20: c000000001b21258 c0002001edcca558 0000000000000018 0000000000000000 GPR24: 0000000001000000 ffffffffffffffff 0000000000000001 0000000000001500 GPR28: c0002001edcc4278 c00000037dd80000 800000050280f033 c000000375ccc720 [345523.706062] NIP [c0080000072cb9c0] kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv] [345523.706065] LR [c0080000072b5e80] kvmppc_handle_exit_hv.isra.53+0x3e8/0x798 [kvm_hv] [345523.706066] Call Trace: [345523.706069] [c000000399467910] [c000000399467940] 0xc000000399467940 (unreliable) [345523.706071] [c000000399467950] [c000000399467980] 0xc000000399467980 [345523.706075] [c0000003994679f0] [c0080000072bd1c4] kvmhv_run_single_vcpu+0xa1c/0xb80 [kvm_hv] [345523.706079] [c000000399467ac0] [c0080000072bd8e0] kvmppc_vcpu_run_hv+0x5b8/0xb00 [kvm_hv] [345523.706087] [c000000399467b90] [c0080000085c93cc] kvmppc_vcpu_run+0x34/0x48 [kvm] [345523.706095] [c000000399467bb0] [c0080000085c582c] kvm_arch_vcpu_ioctl_run+0x244/0x420 [kvm] [345523.706101] [c000000399467c40] [c0080000085b7498] kvm_vcpu_ioctl+0x3d0/0x7b0 [kvm] [345523.706105] [c000000399467db0] [c0000000004adf9c] ksys_ioctl+0x13c/0x170 [345523.706107] [c000000399467e00] [c0000000004adff8] sys_ioctl+0x28/0x80 [345523.706111] [c000000399467e20] [c00000000000b278] system_call+0x5c/0x68 [345523.706112] Instruction dump: [345523.706114] 419e0390 7f8a4840 409d0048 6d497c00 2f89075d 419e021c 6d497c00 2f8907dd [345523.706119] 419e01c0 6d497c00 2f8905dd 419e00a4 <0fe00000> 38210040 38600000 ebc1fff0
and then treats the executed instruction as a 'nop'.
However the POWER9 User's Manual, in section "4.6.10 Book II Invalid Forms", informs that for TM instructions bit 31 is in fact ignored, thus for the TM-related invalid forms ignoring bit 31 and handling them like the valid forms is an acceptable way to handle them. POWER8 behaves the same way too.
This commit changes the handling of the cases here described by treating the TM-related invalid forms that can generate a softpatch interrupt just like their valid forms (w/ bit 31 = 1) instead of as a 'nop' and by gently reporting any other unrecognized case to the host and treating it as illegal instruction instead of throwing a trace and treating it as a 'nop'.
Signed-off-by: Gustavo Romero gromero@linux.ibm.com Reviewed-by: Segher Boessenkool segher@kernel.crashing.org Acked-By: Michael Neuling mikey@neuling.org Reviewed-by: Leonardo Bras leonardo@linux.ibm.com Signed-off-by: Paul Mackerras paulus@ozlabs.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/include/asm/kvm_asm.h | 3 +++ arch/powerpc/kvm/book3s_hv_tm.c | 28 ++++++++++++++++++++----- arch/powerpc/kvm/book3s_hv_tm_builtin.c | 16 ++++++++++++-- 3 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 635fb154b33f9..a3633560493be 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -150,4 +150,7 @@
#define KVM_INST_FETCH_FAILED -1
+/* Extract PO and XOP opcode fields */ +#define PO_XOP_OPCODE_MASK 0xfc0007fe + #endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 0db9374971697..cc90b8b823291 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -3,6 +3,8 @@ * Copyright 2017 Paul Mackerras, IBM Corp. paulus@au1.ibm.com */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kvm_host.h>
#include <asm/kvm_ppc.h> @@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) u64 newmsr, bescr; int ra, rs;
- switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For treclaim., tsr., and trechkpt. instructions if bit + * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section + * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit + * 31 is an acceptable way to handle these invalid forms that have + * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ + * bit 31 set) can generate a softpatch interrupt. Hence both forms + * are handled below for these instructions so they behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return RESUME_GUEST;
- case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ @@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = msr; return RESUME_GUEST;
- case PPC_INST_TRECLAIM: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { /* generate an illegal instruction interrupt */ @@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST;
- case PPC_INST_TRECHKPT: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { @@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) }
/* What should we do here? We didn't recognize the instruction */ - WARN_ON_ONCE(1); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); + return RESUME_GUEST; } diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index 217246279dfae..fad931f224efd 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) u64 newmsr, msr, bescr; int rs;
- switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For the tsr. instruction if bit 31 = 0 then it is per + * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid + * Forms, informs specifically that ignoring bit 31 is an acceptable way + * to handle TM-related invalid forms that have bit 31 = 0. Moreover, + * for emulation purposes both forms (w/ and wo/ bit 31 set) can + * generate a softpatch interrupt. Hence both forms are handled below + * for tsr. to make them behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return 1;
- case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* we know the MSR has the TS field = S (0b01) here */ msr = vcpu->arch.shregs.msr; /* check for PR=1 and arch 2.06 bit set in PCR */
From: Pavel Machek pavel@denx.de
[ Upstream commit 66be340f827554cb1c8a1ed7dea97920b4085af2 ]
We should free resources in unlikely case of allocation failure.
Signed-off-by: Pavel Machek pavel@denx.de Signed-off-by: Rob Clark robdclark@chromium.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/msm/msm_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 4558d66761b3c..108632a1f2438 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (!dev->dma_parms) { dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_msm_uninit; + } } dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
From: Jordan Crouse jcrouse@codeaurora.org
[ Upstream commit 0478b4fc5f37f4d494245fe7bcce3f531cf380e9 ]
If the opp table specifies opp-supported-hw as a property but the driver has not set a supported hardware value the OPP subsystem will reject all the table entries.
Set a "default" value that will match the default table entries but not conflict with any possible real bin values. Also fix a small memory leak and free the buffer allocated by nvmem_cell_read().
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org Reviewed-by: Eric Anholt eric@anholt.net Signed-off-by: Rob Clark robdclark@chromium.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7829247de60e0..de0ea1d09a54f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1407,18 +1407,31 @@ static const struct adreno_gpu_funcs funcs = { static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; - u32 bin, val; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
- /* If a nvmem cell isn't defined, nothing to do */ - if (IS_ERR(cell)) - return; + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); + + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf);
- bin = *((u32 *) nvmem_cell_read(cell, NULL)); - nvmem_cell_put(cell); + val = (1 << bin); + kfree(buf); + }
- val = (1 << bin); + nvmem_cell_put(cell); + }
dev_pm_opp_set_supported_hw(dev, &val, 1); }
From: Nathan Chancellor natechancellor@gmail.com
[ Upstream commit bf2cbe044da275021b2de5917240411a19e5c50d ]
Clang warns:
../kernel/trace/trace.c:9335:33: warning: array comparison always evaluates to true [-Wtautological-compare] if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) ^ 1 warning generated.
These are not true arrays, they are linker defined symbols, which are just addresses. Using the address of operator silences the warning and does not change the runtime result of the check (tested with some print statements compiled in with clang + ld.lld and gcc + ld.bfd in QEMU).
Link: http://lkml.kernel.org/r/20200220051011.26113-1-natechancellor@gmail.com
Link: https://github.com/ClangBuiltLinux/linux/issues/893 Suggested-by: Nick Desaulniers ndesaulniers@google.com Signed-off-by: Nathan Chancellor natechancellor@gmail.com Signed-off-by: Steven Rostedt (VMware) rostedt@goodmis.org Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9007f5edbb207..db8162b34ef64 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -9146,7 +9146,7 @@ __init static int tracer_alloc_buffers(void) goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */ - if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) + if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers();
From: Niklas Söderlund niklas.soderlund+renesas@ragnatech.se
[ Upstream commit 39056e8a989ef52486e063e34b4822b341e47b0e ]
If the common register memory resource is not available the driver needs to fail gracefully to disable PM. Instead of returning the error directly store it in ret and use the already existing error path.
Signed-off-by: Niklas Söderlund niklas.soderlund+renesas@ragnatech.se Reviewed-by: Geert Uytterhoeven geert+renesas@glider.be Signed-off-by: Daniel Lezcano daniel.lezcano@linaro.org Link: https://lore.kernel.org/r/20200310114709.1483860-1-niklas.soderlund+renesas@... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/thermal/rcar_thermal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index d0873de718da9..43f0cd2bd0ae6 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -526,8 +526,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); - if (IS_ERR(common->base)) - return PTR_ERR(common->base); + if (IS_ERR(common->base)) { + ret = PTR_ERR(common->base); + goto error_unregister; + }
idle = 0; /* polling delay is not needed */ }
From: He Zhe zhe.he@windriver.com
[ Upstream commit edec6e015a02003c2af0ce82c54ea016b5a9e3f0 ]
apic->lapic_timer.timer was initialized with HRTIMER_MODE_ABS_HARD but started later with HRTIMER_MODE_ABS, which may cause the following warning in PREEMPT_RT kernel.
WARNING: CPU: 1 PID: 2957 at kernel/time/hrtimer.c:1129 hrtimer_start_range_ns+0x348/0x3f0 CPU: 1 PID: 2957 Comm: qemu-system-x86 Not tainted 5.4.23-rt11 #1 Hardware name: Supermicro SYS-E300-9A-8C/A2SDi-8C-HLN4F, BIOS 1.1a 09/18/2018 RIP: 0010:hrtimer_start_range_ns+0x348/0x3f0 Code: 4d b8 0f 94 c1 0f b6 c9 e8 35 f1 ff ff 4c 8b 45 b0 e9 3b fd ff ff e8 d7 3f fa ff 48 98 4c 03 34 c5 a0 26 bf 93 e9 a1 fd ff ff <0f> 0b e9 fd fc ff ff 65 8b 05 fa b7 90 6d 89 c0 48 0f a3 05 60 91 RSP: 0018:ffffbc60026ffaf8 EFLAGS: 00010202 RAX: 0000000000000001 RBX: ffff9d81657d4110 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000006cc7987bcf RDI: ffff9d81657d4110 RBP: ffffbc60026ffb58 R08: 0000000000000001 R09: 0000000000000010 R10: 0000000000000000 R11: 0000000000000000 R12: 0000006cc7987bcf R13: 0000000000000000 R14: 0000006cc7987bcf R15: ffffbc60026d6a00 FS: 00007f401daed700(0000) GS:ffff9d81ffa40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000ffffffff CR3: 0000000fa7574000 CR4: 00000000003426e0 Call Trace: ? kvm_release_pfn_clean+0x22/0x60 [kvm] start_sw_timer+0x85/0x230 [kvm] ? vmx_vmexit+0x1b/0x30 [kvm_intel] kvm_lapic_switch_to_sw_timer+0x72/0x80 [kvm] vmx_pre_block+0x1cb/0x260 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_vmexit+0x1b/0x30 [kvm_intel] ? vmx_vmexit+0xf/0x30 [kvm_intel] ? vmx_sync_pir_to_irr+0x9e/0x100 [kvm_intel] ? kvm_apic_has_interrupt+0x46/0x80 [kvm] kvm_arch_vcpu_ioctl_run+0x85b/0x1fa0 [kvm] ? _raw_spin_unlock_irqrestore+0x18/0x50 ? _copy_to_user+0x2c/0x30 kvm_vcpu_ioctl+0x235/0x660 [kvm] ? rt_spin_unlock+0x2c/0x50 do_vfs_ioctl+0x3e4/0x650 ? __fget+0x7a/0xa0 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x4d/0x120 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f4027cc54a7 Code: 00 00 90 48 8b 05 e9 59 0c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d b9 59 0c 00 f7 d8 64 89 01 48 RSP: 002b:00007f401dae9858 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00005558bd029690 RCX: 00007f4027cc54a7 RDX: 0000000000000000 RSI: 000000000000ae80 RDI: 000000000000000d RBP: 00007f4028b72000 R08: 00005558bc829ad0 R09: 00000000ffffffff R10: 00005558bcf90ca0 R11: 0000000000000246 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 00005558bce1c840 --[ end trace 0000000000000002 ]--
Signed-off-by: He Zhe zhe.he@windriver.com Message-Id: 1584687967-332859-1-git-send-email-zhe.he@windriver.com Reviewed-by: Wanpeng Li wanpengli@tencent.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/lapic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 6920f1d3b66f5..9f793c9649cdf 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1684,7 +1684,7 @@ static void start_sw_period(struct kvm_lapic *apic)
hrtimer_start(&apic->lapic_timer.timer, apic->lapic_timer.target_expiration, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_HARD); }
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
From: Ian Rogers irogers@google.com
[ Upstream commit d4953f7ef1a2e87ef732823af35361404d13fea8 ]
Reproducible with a clang asan build and then running perf test in particular 'Parse event definition strings'.
Signed-off-by: Ian Rogers irogers@google.com Acked-by: Jiri Olsa jolsa@redhat.com Cc: Adrian Hunter adrian.hunter@intel.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Andi Kleen ak@linux.intel.com Cc: Leo Yan leo.yan@linaro.org Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Cc: clang-built-linux@googlegroups.com Link: http://lore.kernel.org/lkml/20200314170356.62914-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/evsel.c | 1 + tools/perf/util/parse-events.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a844715a352d8..dfc982baecab4 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1254,6 +1254,7 @@ void perf_evsel__exit(struct evsel *evsel) perf_thread_map__put(evsel->core.threads); zfree(&evsel->group_name); zfree(&evsel->name); + zfree(&evsel->pmu_name); perf_evsel__object.fini(evsel); }
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 422ad1888e74f..2a97a5e3aa91e 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1344,7 +1344,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats, NULL); if (evsel) { - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; return 0; } else { @@ -1385,7 +1385,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel->snapshot = info.snapshot; evsel->metric_expr = info.metric_expr; evsel->metric_name = info.metric_name; - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; evsel->percore = config_term_percore(&evsel->config_terms); }
From: Josef Bacik josef@toxicpanda.com
[ Upstream commit 2abc726ab4b83db774e315c660ab8da21477092f ]
We previously were checking if the root had a dead root before accessing root->reloc_root in order to avoid a use-after-free type bug. However this scenario happens after we've unset the reloc control, so we would have been saved if we'd simply checked for fs_info->reloc_control. At this point during relocation we no longer need to be creating new reloc roots, so simply move this check above the reloc_root checks to avoid any future races and confusion.
Reviewed-by: Qu Wenruo wqu@suse.com Signed-off-by: Josef Bacik josef@toxicpanda.com Reviewed-by: David Sterba dsterba@suse.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/relocation.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index af3605a0bf2e0..1313506a7ecb5 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1468,6 +1468,10 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, int clear_rsv = 0; int ret;
+ if (!rc || !rc->create_reloc_tree || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) + return 0; + /* * The subvolume has reloc tree but the swap is finished, no need to * create/update the dead reloc tree @@ -1481,10 +1485,6 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, return 0; }
- if (!rc || !rc->create_reloc_tree || - root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) - return 0; - if (!trans->reloc_reserved) { rsv = trans->block_rsv; trans->block_rsv = rc->block_rsv; @@ -2336,6 +2336,18 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, trans = NULL; goto out; } + + /* + * At this point we no longer have a reloc_control, so we can't + * depend on btrfs_init_reloc_root to update our last_trans. + * + * But that's ok, we started the trans handle on our + * corresponding fs_root, which means it's been added to the + * dirty list. At commit time we'll still call + * btrfs_update_reloc_root() and update our root item + * appropriately. + */ + reloc_root->last_trans = trans->transid; trans->block_rsv = rc->block_rsv;
replaced = 0;
From: Josef Bacik josef@toxicpanda.com
[ Upstream commit 1a0afa0ecfc4dbc8d7583d03cafd3f68f781df0c ]
If we have an error while processing the reloc roots we could leak roots that were added to rc->reloc_roots before we hit the error. We could have also not removed the reloc tree mapping from our rb_tree, so clean up any remaining nodes in the reloc root rb_tree.
Signed-off-by: Josef Bacik josef@toxicpanda.com Reviewed-by: David Sterba dsterba@suse.com [ use rbtree_postorder_for_each_entry_safe ] Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/relocation.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1313506a7ecb5..ece53d2f55ae3 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4354,6 +4354,18 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info) return rc; }
+static void free_reloc_control(struct reloc_control *rc) +{ + struct mapping_node *node, *tmp; + + free_reloc_roots(&rc->reloc_roots); + rbtree_postorder_for_each_entry_safe(node, tmp, + &rc->reloc_root_tree.rb_root, rb_node) + kfree(node); + + kfree(rc); +} + /* * Print the block group being relocated */ @@ -4486,7 +4498,7 @@ out: btrfs_dec_block_group_ro(rc->block_group); iput(rc->data_inode); btrfs_put_block_group(rc->block_group); - kfree(rc); + free_reloc_control(rc); return err; }
@@ -4659,7 +4671,7 @@ out_clean: err = ret; out_unset: unset_reloc_control(rc); - kfree(rc); + free_reloc_control(rc); out: if (!list_empty(&reloc_roots)) free_reloc_roots(&reloc_roots);
From: Heiner Kallweit hkallweit1@gmail.com
[ Upstream commit 6b02e407cbf8d421477ebb7792cd6380affcd313 ]
So far only the reset bit it set, but the handler executing the reset is not scheduled. Therefore nothing will happen until some other action schedules the handler. Improve this by ensuring that the handler is scheduled.
Signed-off-by: Heiner Kallweit hkallweit1@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/realtek/r8169_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 6fa9852e3f97f..903212ad9bb2f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -6256,8 +6256,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if (unlikely(status & RxFIFOOver && tp->mac_version == RTL_GIGA_MAC_VER_11)) { netif_stop_queue(tp->dev); - /* XXX - Hack alert. See rtl_task(). */ - set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); }
rtl_irq_disable(tp);
From: Vignesh Raghavendra vigneshr@ti.com
[ Upstream commit f19c3f6c8109b8bab000afd35580929958e087a9 ]
When port's throttle callback is called, it should stop pushing any more data into TTY buffer to avoid buffer overflow. This means driver has to stop HW from receiving more data and assert the HW flow control. For UARTs with auto HW flow control (such as 8250_omap) manual assertion of flow control line is not possible and only way is to allow RX FIFO to fill up, thus trigger auto HW flow control logic.
Therefore make sure that 8250 generic IRQ handler does not drain data when port is stopped (i.e UART_LSR_DR is unset in read_status_mask). Not servicing, RX FIFO would trigger auto HW flow control when FIFO occupancy reaches preset threshold, thus halting RX. Since, error conditions in UART_LSR register are cleared just by reading the register, data has to be drained in case there are FIFO errors, else error information will lost.
Signed-off-by: Vignesh Raghavendra vigneshr@ti.com Link: https://lore.kernel.org/r/20200319103230.16867-2-vigneshr@ti.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/serial/8250/8250_port.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 90f09ed6e5ad3..5b673077639ba 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1816,6 +1816,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) unsigned char status; unsigned long flags; struct uart_8250_port *up = up_to_u8250p(port); + bool skip_rx = false;
if (iir & UART_IIR_NO_INT) return 0; @@ -1824,7 +1825,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- if (status & (UART_LSR_DR | UART_LSR_BI)) { + /* + * If port is stopped and there are no error conditions in the + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer + * overflow. Not servicing, RX FIFO would trigger auto HW flow + * control when FIFO occupancy reaches preset threshold, thus + * halting RX. This only works when auto HW flow control is + * available. + */ + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && + !(port->read_status_mask & UART_LSR_DR)) + skip_rx = true; + + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); }
From: Peter Ujfalusi peter.ujfalusi@ti.com
[ Upstream commit 4ce35a3617c0ac758c61122b2218b6c8c9ac9398 ]
When booting j721e the following bug is printed:
[ 1.154821] BUG: sleeping function called from invalid context at kernel/sched/completion.c:99 [ 1.154827] in_atomic(): 0, irqs_disabled(): 128, non_block: 0, pid: 12, name: kworker/0:1 [ 1.154832] 3 locks held by kworker/0:1/12: [ 1.154836] #0: ffff000840030728 ((wq_completion)events){+.+.}, at: process_one_work+0x1d4/0x6e8 [ 1.154852] #1: ffff80001214fdd8 (deferred_probe_work){+.+.}, at: process_one_work+0x1d4/0x6e8 [ 1.154860] #2: ffff00084060b170 (&dev->mutex){....}, at: __device_attach+0x38/0x138 [ 1.154872] irq event stamp: 63096 [ 1.154881] hardirqs last enabled at (63095): [<ffff800010b74318>] _raw_spin_unlock_irqrestore+0x70/0x78 [ 1.154887] hardirqs last disabled at (63096): [<ffff800010b740d8>] _raw_spin_lock_irqsave+0x28/0x80 [ 1.154893] softirqs last enabled at (62254): [<ffff800010080c88>] _stext+0x488/0x564 [ 1.154899] softirqs last disabled at (62247): [<ffff8000100fdb3c>] irq_exit+0x114/0x140 [ 1.154906] CPU: 0 PID: 12 Comm: kworker/0:1 Not tainted 5.6.0-rc6-next-20200318-00094-g45e4089b0bd3 #221 [ 1.154911] Hardware name: Texas Instruments K3 J721E SoC (DT) [ 1.154917] Workqueue: events deferred_probe_work_func [ 1.154923] Call trace: [ 1.154928] dump_backtrace+0x0/0x190 [ 1.154933] show_stack+0x14/0x20 [ 1.154940] dump_stack+0xe0/0x148 [ 1.154946] ___might_sleep+0x150/0x1f0 [ 1.154952] __might_sleep+0x4c/0x80 [ 1.154957] wait_for_completion_timeout+0x40/0x140 [ 1.154964] ti_sci_set_device_state+0xa0/0x158 [ 1.154969] ti_sci_cmd_get_device_exclusive+0x14/0x20 [ 1.154977] ti_sci_dev_start+0x34/0x50 [ 1.154984] genpd_runtime_resume+0x78/0x1f8 [ 1.154991] __rpm_callback+0x3c/0x140 [ 1.154996] rpm_callback+0x20/0x80 [ 1.155001] rpm_resume+0x568/0x758 [ 1.155007] __pm_runtime_resume+0x44/0xb0 [ 1.155013] omap8250_probe+0x2b4/0x508 [ 1.155019] platform_drv_probe+0x50/0xa0 [ 1.155023] really_probe+0xd4/0x318 [ 1.155028] driver_probe_device+0x54/0xe8 [ 1.155033] __device_attach_driver+0x80/0xb8 [ 1.155039] bus_for_each_drv+0x74/0xc0 [ 1.155044] __device_attach+0xdc/0x138 [ 1.155049] device_initial_probe+0x10/0x18 [ 1.155053] bus_probe_device+0x98/0xa0 [ 1.155058] deferred_probe_work_func+0x74/0xb0 [ 1.155063] process_one_work+0x280/0x6e8 [ 1.155068] worker_thread+0x48/0x430 [ 1.155073] kthread+0x108/0x138 [ 1.155079] ret_from_fork+0x10/0x18
To fix the bug we need to first call pm_runtime_enable() prior to any pm_runtime calls.
Reported-by: Tomi Valkeinen tomi.valkeinen@ti.com Signed-off-by: Peter Ujfalusi peter.ujfalusi@ti.com Link: https://lore.kernel.org/r/20200320125200.6772-1-peter.ujfalusi@ti.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/serial/8250/8250_omap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 836e736ae188b..2624b5d083366 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1234,6 +1234,7 @@ static int omap8250_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_dma_lock);
device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
/* @@ -1247,7 +1248,6 @@ static int omap8250_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
pm_runtime_irq_safe(&pdev->dev); - pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
From: Vignesh Raghavendra vigneshr@ti.com
[ Upstream commit 7cf4df30a98175033e9849f7f16c46e96ba47f41 ]
Terminate and flush DMA internal buffers, before pushing RX data to higher layer. Otherwise, this will lead to data corruption, as driver would end up pushing stale buffer data to higher layer while actual data is still stuck inside DMA hardware and has yet not arrived at the memory. While at that, replace deprecated dmaengine_terminate_all() with dmaengine_terminate_async().
Signed-off-by: Vignesh Raghavendra vigneshr@ti.com Link: https://lore.kernel.org/r/20200319110344.21348-2-vigneshr@ti.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/serial/8250/8250_omap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 2624b5d083366..f2c6d9d3bb28f 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -790,7 +790,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
count = dma->rx_size - state.residue; - + if (count < dma->rx_size) + dmaengine_terminate_async(dma->rxchan); + if (!count) + goto unlock; ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += ret; @@ -852,7 +855,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
__dma_rx_do_complete(p); - dmaengine_terminate_all(dma->rxchan); }
static int omap_8250_rx_dma(struct uart_8250_port *p)
From: Christophe JAILLET christophe.jaillet@wanadoo.fr
[ Upstream commit d74b181a028bb5a468f0c609553eff6a8fdf4887 ]
'snprintf' returns the number of characters which would be generated for the given input.
If the returned value is *greater than* or equal to the buffer size, it means that the output has been truncated.
Fix the overflow test accordingly.
Fixes: 7780c25bae59f ("perf tools: Allow ability to map cpus to nodes easily") Fixes: 92a7e1278005b ("perf cpumap: Add cpu__max_present_cpu()") Signed-off-by: Christophe JAILLET christophe.jaillet@wanadoo.fr Suggested-by: David Laight David.Laight@ACULAB.COM Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Don Zickus dzickus@redhat.com Cc: He Zhe zhe.he@windriver.com Cc: Jan Stancek jstancek@redhat.com Cc: Jiri Olsa jolsa@redhat.com Cc: Kan Liang kan.liang@linux.intel.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: kernel-janitors@vger.kernel.org Link: http://lore.kernel.org/lkml/20200324070319.10901-1-christophe.jaillet@wanado... Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/cpumap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index a22c1114e880d..324ec0456c83f 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -299,7 +299,7 @@ static void set_max_cpu_num(void)
/* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -310,7 +310,7 @@ static void set_max_cpu_num(void)
/* get the highest present cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -338,7 +338,7 @@ static void set_max_node_num(void)
/* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -423,7 +423,7 @@ int cpu__setup_cpunode_map(void) return 0;
n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); return -1; } @@ -438,7 +438,7 @@ int cpu__setup_cpunode_map(void) continue;
n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); continue; }
From: Andre Przywara andre.przywara@arm.com
[ Upstream commit 24201a64770afe2e17050b2ab9e8c0e24e9c23b2 ]
The DMA error handler routine is currently a tasklet, scheduled to run after the DMA error IRQ was handled. However it needs to take the MDIO mutex, which is not allowed to do in a tasklet. A kernel (with debug options) complains consequently: [ 614.050361] net eth0: DMA Tx error 0x174019 [ 614.064002] net eth0: Current BD is at: 0x8f84aa0ce [ 614.080195] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:935 [ 614.109484] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 40, name: kworker/u4:4 [ 614.135428] 3 locks held by kworker/u4:4/40: [ 614.149075] #0: ffff000879863328 ((wq_completion)rpciod){....}, at: process_one_work+0x1f0/0x6a8 [ 614.177528] #1: ffff80001251bdf8 ((work_completion)(&task->u.tk_work)){....}, at: process_one_work+0x1f0/0x6a8 [ 614.209033] #2: ffff0008784e0110 (sk_lock-AF_INET-RPC){....}, at: tcp_sendmsg+0x24/0x58 [ 614.235429] CPU: 0 PID: 40 Comm: kworker/u4:4 Not tainted 5.6.0-rc3-00926-g4a165a9d5921 #26 [ 614.260854] Hardware name: ARM Test FPGA (DT) [ 614.274734] Workqueue: rpciod rpc_async_schedule [ 614.289022] Call trace: [ 614.296871] dump_backtrace+0x0/0x1a0 [ 614.308311] show_stack+0x14/0x20 [ 614.318751] dump_stack+0xbc/0x100 [ 614.329403] ___might_sleep+0xf0/0x140 [ 614.341018] __might_sleep+0x4c/0x80 [ 614.352201] __mutex_lock+0x5c/0x8a8 [ 614.363348] mutex_lock_nested+0x1c/0x28 [ 614.375654] axienet_dma_err_handler+0x38/0x388 [ 614.389999] tasklet_action_common.isra.15+0x160/0x1a8 [ 614.405894] tasklet_action+0x24/0x30 [ 614.417297] efi_header_end+0xe0/0x494 [ 614.429020] irq_exit+0xd0/0xd8 [ 614.439047] __handle_domain_irq+0x60/0xb0 [ 614.451877] gic_handle_irq+0xdc/0x2d0 [ 614.463486] el1_irq+0xcc/0x180 [ 614.473451] __tcp_transmit_skb+0x41c/0xb58 [ 614.486513] tcp_write_xmit+0x224/0x10a0 [ 614.498792] __tcp_push_pending_frames+0x38/0xc8 [ 614.513126] tcp_rcv_established+0x41c/0x820 [ 614.526301] tcp_v4_do_rcv+0x8c/0x218 [ 614.537784] __release_sock+0x5c/0x108 [ 614.549466] release_sock+0x34/0xa0 [ 614.560318] tcp_sendmsg+0x40/0x58 [ 614.571053] inet_sendmsg+0x40/0x68 [ 614.582061] sock_sendmsg+0x18/0x30 [ 614.593074] xs_sendpages+0x218/0x328 [ 614.604506] xs_tcp_send_request+0xa0/0x1b8 [ 614.617461] xprt_transmit+0xc8/0x4f0 [ 614.628943] call_transmit+0x8c/0xa0 [ 614.640028] __rpc_execute+0xbc/0x6f8 [ 614.651380] rpc_async_schedule+0x28/0x48 [ 614.663846] process_one_work+0x298/0x6a8 [ 614.676299] worker_thread+0x40/0x490 [ 614.687687] kthread+0x134/0x138 [ 614.697804] ret_from_fork+0x10/0x18 [ 614.717319] xilinx_axienet 7fe00000.ethernet eth0: Link is Down [ 615.748343] xilinx_axienet 7fe00000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off
Since tasklets are not really popular anymore anyway, lets convert this over to a work queue, which can sleep and thus can take the MDIO mutex.
Signed-off-by: Andre Przywara andre.przywara@arm.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 2 +- .../net/ethernet/xilinx/xilinx_axienet_main.c | 24 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 2dacfc85b3baa..04e51af32178c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -435,7 +435,7 @@ struct axienet_local { void __iomem *regs; void __iomem *dma_regs;
- struct tasklet_struct dma_err_tasklet; + struct work_struct dma_err_task;
int tx_irq; int rx_irq; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 479325eeaf8a0..345a795666e92 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -806,7 +806,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) /* Write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet); + schedule_work(&lp->dma_err_task); axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); } out: @@ -855,7 +855,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) /* write to the Rx channel control register */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet); + schedule_work(&lp->dma_err_task); axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); } out: @@ -891,7 +891,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) return IRQ_HANDLED; }
-static void axienet_dma_err_handler(unsigned long data); +static void axienet_dma_err_handler(struct work_struct *work);
/** * axienet_open - Driver open routine. @@ -935,9 +935,8 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
- /* Enable tasklets for Axi DMA error handling */ - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, - (unsigned long) lp); + /* Enable worker thread for Axi DMA error handling */ + INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
/* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, @@ -966,7 +965,7 @@ err_rx_irq: err_tx_irq: phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); - tasklet_kill(&lp->dma_err_tasklet); + cancel_work_sync(&lp->dma_err_task); dev_err(lp->dev, "request_irq() failed\n"); return ret; } @@ -1025,7 +1024,7 @@ static int axienet_stop(struct net_device *ndev) axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock);
- tasklet_kill(&lp->dma_err_tasklet); + cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); @@ -1505,17 +1504,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = { };
/** - * axienet_dma_err_handler - Tasklet handler for Axi DMA Error - * @data: Data passed + * axienet_dma_err_handler - Work queue task for Axi DMA Error + * @work: pointer to work_struct * * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the * Tx/Rx BDs. */ -static void axienet_dma_err_handler(unsigned long data) +static void axienet_dma_err_handler(struct work_struct *work) { u32 axienet_status; u32 cr, i; - struct axienet_local *lp = (struct axienet_local *) data; + struct axienet_local *lp = container_of(work, struct axienet_local, + dma_err_task); struct net_device *ndev = lp->ndev; struct axidma_bd *cur_p;
From: Andre Przywara andre.przywara@arm.com
[ Upstream commit ee44d0b78839b21591501424fd3cb3648cc803b5 ]
When we fail allocating the DMA buffers in axienet_dma_bd_init(), we report this error, but carry on with initialisation nevertheless.
This leads to a kernel panic when the driver later wants to send a packet, as it uses uninitialised data structures.
Make the axienet_device_reset() routine return an error value, as it contains the DMA buffer initialisation. Make sure we propagate the error up the chain and eventually fail the driver initialisation, to avoid relying on non-initialised buffers.
Signed-off-by: Andre Przywara andre.przywara@arm.com Reviewed-by: Radhey Shyam Pandey radhey.shyam.pandey@xilinx.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- .../net/ethernet/xilinx/xilinx_axienet_main.c | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 345a795666e92..bb6e52f3bdf9b 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -437,9 +437,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) lp->options |= options; }
-static void __axienet_device_reset(struct axienet_local *lp) +static int __axienet_device_reset(struct axienet_local *lp) { u32 timeout; + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this @@ -455,9 +456,11 @@ static void __axienet_device_reset(struct axienet_local *lp) if (--timeout == 0) { netdev_err(lp->ndev, "%s: DMA reset timeout!\n", __func__); - break; + return -ETIMEDOUT; } } + + return 0; }
/** @@ -470,13 +473,17 @@ static void __axienet_device_reset(struct axienet_local *lp) * areconnected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. + * Returns 0 on success or a negative error number otherwise. */ -static void axienet_device_reset(struct net_device *ndev) +static int axienet_device_reset(struct net_device *ndev) { u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); + int ret;
- __axienet_device_reset(lp); + ret = __axienet_device_reset(lp); + if (ret) + return ret;
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options |= XAE_OPTION_VLAN; @@ -491,9 +498,11 @@ static void axienet_device_reset(struct net_device *ndev) lp->options |= XAE_OPTION_JUMBO; }
- if (axienet_dma_bd_init(ndev)) { + ret = axienet_dma_bd_init(ndev); + if (ret) { netdev_err(ndev, "%s: descriptor allocation failed\n", __func__); + return ret; }
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); @@ -518,6 +527,8 @@ static void axienet_device_reset(struct net_device *ndev) axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev); + + return 0; }
/** @@ -921,8 +932,9 @@ static int axienet_open(struct net_device *ndev) */ mutex_lock(&lp->mii_bus->mdio_lock); axienet_mdio_disable(lp); - axienet_device_reset(ndev); - ret = axienet_mdio_enable(lp); + ret = axienet_device_reset(ndev); + if (ret == 0) + ret = axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); if (ret < 0) return ret;
From: Pratik Rajesh Sampat psampat@linux.ibm.com
[ Upstream commit d95fe371ecd28901f11256c610b988ed44e36ee2 ]
The patch avoids allocating cpufreq_policy on stack hence fixing frame size overflow in 'powernv_cpufreq_work_fn'
Fixes: 227942809b52 ("cpufreq: powernv: Restore cpu frequency to policy->cur on unthrottling") Signed-off-by: Pratik Rajesh Sampat psampat@linux.ibm.com Reviewed-by: Daniel Axtens dja@axtens.net Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20200316135743.57735-1-psampat@linux.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/cpufreq/powernv-cpufreq.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 1806b1da43665..3a2f022f6bde2 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { void powernv_cpufreq_work_fn(struct work_struct *work) { struct chip *chip = container_of(work, struct chip, throttle); + struct cpufreq_policy *policy; unsigned int cpu; cpumask_t mask;
@@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) chip->restore = false; for_each_cpu(cpu, &mask) { int index; - struct cpufreq_policy policy;
- cpufreq_get_policy(&policy, cpu); - index = cpufreq_table_find_index_c(&policy, policy.cur); - powernv_cpufreq_target_index(&policy, index); - cpumask_andnot(&mask, &mask, policy.cpus); + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + index = cpufreq_table_find_index_c(policy, policy->cur); + powernv_cpufreq_target_index(policy, index); + cpumask_andnot(&mask, &mask, policy->cpus); + cpufreq_cpu_put(policy); } out: put_online_cpus();
From: Gabriel Ravier gabravier@gmail.com
[ Upstream commit d1ee7e1f5c9191afb69ce46cc7752e4257340a31 ]
If '-o' was used more than 64 times in a single invocation of gpio-hammer, this could lead to an overflow of the 'lines' array. This commit fixes this by avoiding the overflow and giving a proper diagnostic back to the user
Signed-off-by: Gabriel Ravier gabravier@gmail.com Signed-off-by: Bartosz Golaszewski bgolaszewski@baylibre.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/gpio/gpio-hammer.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c index 0e0060a6eb346..083399d276e4e 100644 --- a/tools/gpio/gpio-hammer.c +++ b/tools/gpio/gpio-hammer.c @@ -135,7 +135,14 @@ int main(int argc, char **argv) device_name = optarg; break; case 'o': - lines[i] = strtoul(optarg, NULL, 10); + /* + * Avoid overflow. Do not immediately error, we want to + * be able to accurately report on the amount of times + * '-o' was given to give an accurate error message + */ + if (i < GPIOHANDLES_MAX) + lines[i] = strtoul(optarg, NULL, 10); + i++; break; case '?': @@ -143,6 +150,14 @@ int main(int argc, char **argv) return -1; } } + + if (i >= GPIOHANDLES_MAX) { + fprintf(stderr, + "Only %d occurences of '-o' are allowed, %d were found\n", + GPIOHANDLES_MAX, i + 1); + return -1; + } + nlines = i;
if (!device_name || !nlines) {
From: "Eric W. Biederman" ebiederm@xmission.com
[ Upstream commit eea9673250db4e854e9998ef9da6d4584857f0ea ]
The cred_guard_mutex is problematic as it is held over possibly indefinite waits for userspace. The possible indefinite waits for userspace that I have identified are: The cred_guard_mutex is held in PTRACE_EVENT_EXIT waiting for the tracer. The cred_guard_mutex is held over "put_user(0, tsk->clear_child_tid)" in exit_mm(). The cred_guard_mutex is held over "get_user(futex_offset, ...") in exit_robust_list. The cred_guard_mutex held over copy_strings.
The functions get_user and put_user can trigger a page fault which can potentially wait indefinitely in the case of userfaultfd or if userspace implements part of the page fault path.
In any of those cases the userspace process that the kernel is waiting for might make a different system call that winds up taking the cred_guard_mutex and result in deadlock.
Holding a mutex over any of those possibly indefinite waits for userspace does not appear necessary. Add exec_update_mutex that will just cover updating the process during exec where the permissions and the objects pointed to by the task struct may be out of sync.
The plan is to switch the users of cred_guard_mutex to exec_update_mutex one by one. This lets us move forward while still being careful and not introducing any regressions.
Link: https://lore.kernel.org/lkml/20160921152946.GA24210@dhcp22.suse.cz/ Link: https://lore.kernel.org/lkml/AM6PR03MB5170B06F3A2B75EFB98D071AE4E60@AM6PR03M... Link: https://lore.kernel.org/linux-fsdevel/20161102181806.GB1112@redhat.com/ Link: https://lore.kernel.org/lkml/20160923095031.GA14923@redhat.com/ Link: https://lore.kernel.org/lkml/20170213141452.GA30203@redhat.com/ Ref: 45c1a159b85b ("Add PTRACE_O_TRACEVFORKDONE and PTRACE_O_TRACEEXIT facilities.") Ref: 456f17cd1a28 ("[PATCH] user-vm-unlock-2.5.31-A2") Reviewed-by: Kirill Tkhai ktkhai@virtuozzo.com Signed-off-by: "Eric W. Biederman" ebiederm@xmission.com Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/exec.c | 22 +++++++++++++++++++--- include/linux/binfmts.h | 8 +++++++- include/linux/sched/signal.h | 9 ++++++++- init/init_task.c | 1 + kernel/fork.c | 1 + 5 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/fs/exec.c b/fs/exec.c index d62cd1d71098f..de833553ae27d 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1007,16 +1007,26 @@ ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len) } EXPORT_SYMBOL(read_code);
+/* + * Maps the mm_struct mm into the current task struct. + * On success, this function returns with the mutex + * exec_update_mutex locked. + */ static int exec_mmap(struct mm_struct *mm) { struct task_struct *tsk; struct mm_struct *old_mm, *active_mm; + int ret;
/* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; exec_mm_release(tsk, old_mm);
+ ret = mutex_lock_killable(&tsk->signal->exec_update_mutex); + if (ret) + return ret; + if (old_mm) { sync_mm_rss(old_mm); /* @@ -1028,9 +1038,11 @@ static int exec_mmap(struct mm_struct *mm) down_read(&old_mm->mmap_sem); if (unlikely(old_mm->core_state)) { up_read(&old_mm->mmap_sem); + mutex_unlock(&tsk->signal->exec_update_mutex); return -EINTR; } } + task_lock(tsk); active_mm = tsk->active_mm; membarrier_exec_mmap(mm); @@ -1285,11 +1297,12 @@ int flush_old_exec(struct linux_binprm * bprm) goto out;
/* - * After clearing bprm->mm (to mark that current is using the - * prepared mm now), we have nothing left of the original + * After setting bprm->called_exec_mmap (to mark that current is + * using the prepared mm now), we have nothing left of the original * process. If anything from here on returns an error, the check * in search_binary_handler() will SEGV current. */ + bprm->called_exec_mmap = 1; bprm->mm = NULL;
set_fs(USER_DS); @@ -1423,6 +1436,8 @@ static void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { + if (bprm->called_exec_mmap) + mutex_unlock(¤t->signal->exec_update_mutex); mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } @@ -1472,6 +1487,7 @@ void install_exec_creds(struct linux_binprm *bprm) * credentials; any time after this it may be unlocked. */ security_bprm_committed_creds(bprm); + mutex_unlock(¤t->signal->exec_update_mutex); mutex_unlock(¤t->signal->cred_guard_mutex); } EXPORT_SYMBOL(install_exec_creds); @@ -1663,7 +1679,7 @@ int search_binary_handler(struct linux_binprm *bprm)
read_lock(&binfmt_lock); put_binfmt(fmt); - if (retval < 0 && !bprm->mm) { + if (retval < 0 && bprm->called_exec_mmap) { /* we got to flush_old_exec() and failed after it */ read_unlock(&binfmt_lock); force_sigsegv(SIGSEGV); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b40fc633f3be6..a345d9fed3d8d 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -44,7 +44,13 @@ struct linux_binprm { * exec has happened. Used to sanitize execution environment * and to set AT_SECURE auxv for glibc. */ - secureexec:1; + secureexec:1, + /* + * Set by flush_old_exec, when exec_mmap has been called. + * This is past the point of no return, when the + * exec_update_mutex has been taken. + */ + called_exec_mmap:1; #ifdef __alpha__ unsigned int taso:1; #endif diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 88050259c466e..a29df79540ce6 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -224,7 +224,14 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations - * (notably. ptrace) */ + * (notably. ptrace) + * Deprecated do not use in new code. + * Use exec_update_mutex instead. + */ + struct mutex exec_update_mutex; /* Held while task_struct is being + * updated during exec, and may have + * inconsistent permissions. + */ } __randomize_layout;
/* diff --git a/init/init_task.c b/init/init_task.c index 9e5cbe5eab7b1..bd403ed3e4184 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -26,6 +26,7 @@ static struct signal_struct init_signals = { .multiprocess = HLIST_HEAD_INIT, .rlim = INIT_RLIMITS, .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex), + .exec_update_mutex = __MUTEX_INITIALIZER(init_signals.exec_update_mutex), #ifdef CONFIG_POSIX_TIMERS .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers), .cputimer = { diff --git a/kernel/fork.c b/kernel/fork.c index 9180f4416dbab..cfdc57658ad88 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1586,6 +1586,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) sig->oom_score_adj_min = current->signal->oom_score_adj_min;
mutex_init(&sig->cred_guard_mutex); + mutex_init(&sig->exec_update_mutex);
return 0; }
From: Bernd Edlinger bernd.edlinger@hotmail.de
[ Upstream commit 3e74fabd39710ee29fa25618d2c2b40cfa7d76c7 ]
This fixes a deadlock in the tracer when tracing a multi-threaded application that calls execve while more than one thread are running.
I observed that when running strace on the gcc test suite, it always blocks after a while, when expect calls execve, because other threads have to be terminated. They send ptrace events, but the strace is no longer able to respond, since it is blocked in vm_access.
The deadlock is always happening when strace needs to access the tracees process mmap, while another thread in the tracee starts to execve a child process, but that cannot continue until the PTRACE_EVENT_EXIT is handled and the WIFEXITED event is received:
strace D 0 30614 30584 0x00000000 Call Trace: __schedule+0x3ce/0x6e0 schedule+0x5c/0xd0 schedule_preempt_disabled+0x15/0x20 __mutex_lock.isra.13+0x1ec/0x520 __mutex_lock_killable_slowpath+0x13/0x20 mutex_lock_killable+0x28/0x30 mm_access+0x27/0xa0 process_vm_rw_core.isra.3+0xff/0x550 process_vm_rw+0xdd/0xf0 __x64_sys_process_vm_readv+0x31/0x40 do_syscall_64+0x64/0x220 entry_SYSCALL_64_after_hwframe+0x44/0xa9
expect D 0 31933 30876 0x80004003 Call Trace: __schedule+0x3ce/0x6e0 schedule+0x5c/0xd0 flush_old_exec+0xc4/0x770 load_elf_binary+0x35a/0x16c0 search_binary_handler+0x97/0x1d0 __do_execve_file.isra.40+0x5d4/0x8a0 __x64_sys_execve+0x49/0x60 do_syscall_64+0x64/0x220 entry_SYSCALL_64_after_hwframe+0x44/0xa9
This changes mm_access to use the new exec_update_mutex instead of cred_guard_mutex.
This patch is based on the following patch by Eric W. Biederman: "[PATCH 0/5] Infrastructure to allow fixing exec deadlocks" Link: https://lore.kernel.org/lkml/87v9ne5y4y.fsf_-_@x220.int.ebiederm.org/
Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Reviewed-by: Kees Cook keescook@chromium.org Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/fork.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/fork.c b/kernel/fork.c index cfdc57658ad88..594272569a80f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1221,7 +1221,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) struct mm_struct *mm; int err;
- err = mutex_lock_killable(&task->signal->cred_guard_mutex); + err = mutex_lock_killable(&task->signal->exec_update_mutex); if (err) return ERR_PTR(err);
@@ -1231,7 +1231,7 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) mmput(mm); mm = ERR_PTR(-EACCES); } - mutex_unlock(&task->signal->cred_guard_mutex); + mutex_unlock(&task->signal->exec_update_mutex);
return mm; }
From: Bernd Edlinger bernd.edlinger@hotmail.de
[ Upstream commit 2de4e82318c7f9d34f4b08599a612cd4cd10bf0b ]
This adds test cases for ptrace deadlocks.
Additionally fixes a compile problem in get_syscall_info.c, observed with gcc-4.8.4:
get_syscall_info.c: In function 'get_syscall_info': get_syscall_info.c:93:3: error: 'for' loop initial declarations are only allowed in C99 mode for (unsigned int i = 0; i < ARRAY_SIZE(args); ++i) { ^ get_syscall_info.c:93:3: note: use option -std=c99 or -std=gnu99 to compile your code
Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Reviewed-by: Kees Cook keescook@chromium.org Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/ptrace/Makefile | 4 +- tools/testing/selftests/ptrace/vmaccess.c | 86 +++++++++++++++++++++++ 2 files changed, 88 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/ptrace/vmaccess.c
diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile index c0b7f89f09300..2f1f532c39dbc 100644 --- a/tools/testing/selftests/ptrace/Makefile +++ b/tools/testing/selftests/ptrace/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only -CFLAGS += -iquote../../../../include/uapi -Wall +CFLAGS += -std=c99 -pthread -iquote../../../../include/uapi -Wall
-TEST_GEN_PROGS := get_syscall_info peeksiginfo +TEST_GEN_PROGS := get_syscall_info peeksiginfo vmaccess
include ../lib.mk diff --git a/tools/testing/selftests/ptrace/vmaccess.c b/tools/testing/selftests/ptrace/vmaccess.c new file mode 100644 index 0000000000000..4db327b445862 --- /dev/null +++ b/tools/testing/selftests/ptrace/vmaccess.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2020 Bernd Edlinger bernd.edlinger@hotmail.de + * All rights reserved. + * + * Check whether /proc/$pid/mem can be accessed without causing deadlocks + * when de_thread is blocked with ->cred_guard_mutex held. + */ + +#include "../kselftest_harness.h" +#include <stdio.h> +#include <fcntl.h> +#include <pthread.h> +#include <signal.h> +#include <unistd.h> +#include <sys/ptrace.h> + +static void *thread(void *arg) +{ + ptrace(PTRACE_TRACEME, 0, 0L, 0L); + return NULL; +} + +TEST(vmaccess) +{ + int f, pid = fork(); + char mm[64]; + + if (!pid) { + pthread_t pt; + + pthread_create(&pt, NULL, thread, NULL); + pthread_join(pt, NULL); + execlp("true", "true", NULL); + } + + sleep(1); + sprintf(mm, "/proc/%d/mem", pid); + f = open(mm, O_RDONLY); + ASSERT_GE(f, 0); + close(f); + f = kill(pid, SIGCONT); + ASSERT_EQ(f, 0); +} + +TEST(attach) +{ + int s, k, pid = fork(); + + if (!pid) { + pthread_t pt; + + pthread_create(&pt, NULL, thread, NULL); + pthread_join(pt, NULL); + execlp("sleep", "sleep", "2", NULL); + } + + sleep(1); + k = ptrace(PTRACE_ATTACH, pid, 0L, 0L); + ASSERT_EQ(errno, EAGAIN); + ASSERT_EQ(k, -1); + k = waitpid(-1, &s, WNOHANG); + ASSERT_NE(k, -1); + ASSERT_NE(k, 0); + ASSERT_NE(k, pid); + ASSERT_EQ(WIFEXITED(s), 1); + ASSERT_EQ(WEXITSTATUS(s), 0); + sleep(1); + k = ptrace(PTRACE_ATTACH, pid, 0L, 0L); + ASSERT_EQ(k, 0); + k = waitpid(-1, &s, 0); + ASSERT_EQ(k, pid); + ASSERT_EQ(WIFSTOPPED(s), 1); + ASSERT_EQ(WSTOPSIG(s), SIGSTOP); + k = ptrace(PTRACE_DETACH, pid, 0L, 0L); + ASSERT_EQ(k, 0); + k = waitpid(-1, &s, 0); + ASSERT_EQ(k, pid); + ASSERT_EQ(WIFEXITED(s), 1); + ASSERT_EQ(WEXITSTATUS(s), 0); + k = waitpid(-1, NULL, 0); + ASSERT_EQ(k, -1); + ASSERT_EQ(errno, ECHILD); +} + +TEST_HARNESS_MAIN
From: Bernd Edlinger bernd.edlinger@hotmail.de
[ Upstream commit 454e3126cb842388e22df6b3ac3da44062c00765 ]
This changes kcmp_epoll_target to use the new exec_update_mutex instead of cred_guard_mutex.
This should be safe, as the credentials are only used for reading, and furthermore ->mm and ->sighand are updated on execve, but only under the new exec_update_mutex.
Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/kcmp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/kcmp.c b/kernel/kcmp.c index a0e3d7a0e8b81..b3ff9288c6cc9 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c @@ -173,8 +173,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, /* * One should have enough rights to inspect task details. */ - ret = kcmp_lock(&task1->signal->cred_guard_mutex, - &task2->signal->cred_guard_mutex); + ret = kcmp_lock(&task1->signal->exec_update_mutex, + &task2->signal->exec_update_mutex); if (ret) goto err; if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) || @@ -229,8 +229,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, }
err_unlock: - kcmp_unlock(&task1->signal->cred_guard_mutex, - &task2->signal->cred_guard_mutex); + kcmp_unlock(&task1->signal->exec_update_mutex, + &task2->signal->exec_update_mutex); err: put_task_struct(task1); put_task_struct(task2);
From: Bernd Edlinger bernd.edlinger@hotmail.de
[ Upstream commit 2db9dbf71bf98d02a0bf33e798e5bfd2a9944696 ]
This changes lock_trace to use the new exec_update_mutex instead of cred_guard_mutex.
This fixes possible deadlocks when the trace is accessing /proc/$pid/stack for instance.
This should be safe, as the credentials are only used for reading, and task->mm is updated on execve under the new exec_update_mutex.
Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/proc/base.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fs/proc/base.c b/fs/proc/base.c index ebea9501afb84..4fdfe4faa74ee 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -403,11 +403,11 @@ print0:
static int lock_trace(struct task_struct *task) { - int err = mutex_lock_killable(&task->signal->cred_guard_mutex); + int err = mutex_lock_killable(&task->signal->exec_update_mutex); if (err) return err; if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { - mutex_unlock(&task->signal->cred_guard_mutex); + mutex_unlock(&task->signal->exec_update_mutex); return -EPERM; } return 0; @@ -415,7 +415,7 @@ static int lock_trace(struct task_struct *task)
static void unlock_trace(struct task_struct *task) { - mutex_unlock(&task->signal->cred_guard_mutex); + mutex_unlock(&task->signal->exec_update_mutex); }
#ifdef CONFIG_STACKTRACE
From: Bernd Edlinger bernd.edlinger@hotmail.de
[ Upstream commit 76518d3798855242817e8a8ed76b2d72f4415624 ]
This changes do_io_accounting to use the new exec_update_mutex instead of cred_guard_mutex.
This fixes possible deadlocks when the trace is accessing /proc/$pid/io for instance.
This should be safe, as the credentials are only used for reading.
Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/proc/base.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/proc/base.c b/fs/proc/base.c index 4fdfe4faa74ee..529d0c6ec6f9c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2770,7 +2770,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh unsigned long flags; int result;
- result = mutex_lock_killable(&task->signal->cred_guard_mutex); + result = mutex_lock_killable(&task->signal->exec_update_mutex); if (result) return result;
@@ -2806,7 +2806,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh result = 0;
out_unlock: - mutex_unlock(&task->signal->cred_guard_mutex); + mutex_unlock(&task->signal->exec_update_mutex); return result; }
From: Bernd Edlinger bernd.edlinger@hotmail.de
[ Upstream commit 6914303824bb572278568330d72fc1f8f9814e67 ]
This changes perf_event_set_clock to use the new exec_update_mutex instead of cred_guard_mutex.
This should be safe, as the credentials are only used for reading.
Signed-off-by: Bernd Edlinger bernd.edlinger@hotmail.de Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/events/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c index db1f5aa755f22..47646050efa0c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1253,7 +1253,7 @@ static void put_ctx(struct perf_event_context *ctx) * function. * * Lock order: - * cred_guard_mutex + * exec_update_mutex * task_struct::perf_event_mutex * perf_event_context::mutex * perf_event::child_mutex; @@ -11002,14 +11002,14 @@ SYSCALL_DEFINE5(perf_event_open, }
if (task) { - err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); + err = mutex_lock_interruptible(&task->signal->exec_update_mutex); if (err) goto err_task;
/* * Reuse ptrace permission checks for now. * - * We must hold cred_guard_mutex across this and any potential + * We must hold exec_update_mutex across this and any potential * perf_install_in_context() call for this new event to * serialize against exec() altering our credentials (and the * perf_event_exit_task() that could imply). @@ -11298,7 +11298,7 @@ SYSCALL_DEFINE5(perf_event_open, mutex_unlock(&ctx->mutex);
if (task) { - mutex_unlock(&task->signal->cred_guard_mutex); + mutex_unlock(&task->signal->exec_update_mutex); put_task_struct(task); }
@@ -11334,7 +11334,7 @@ err_alloc: free_event(event); err_cred: if (task) - mutex_unlock(&task->signal->cred_guard_mutex); + mutex_unlock(&task->signal->exec_update_mutex); err_task: if (task) put_task_struct(task); @@ -11639,7 +11639,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) /* * When a child task exits, feed back event values to parent events. * - * Can be called with cred_guard_mutex held when called from + * Can be called with exec_update_mutex held when called from * install_exec_creds(). */ void perf_event_exit_task(struct task_struct *child)
From: John Meneghini johnm@netapp.com
[ Upstream commit 764e9332098c0e60251386a507fe46ac91276120 ]
The nvme multipath error handling defaults to controller reset if the error is unknown. There are, however, no existing nvme status codes that indicate a reset should be used, and resetting causes unnecessary disruption to the rest of IO.
Change nvme's error handling to first check if failover should happen. If not, let the normal error handling take over rather than reset the controller.
Based-on-a-patch-by: Christoph Hellwig hch@lst.de Reviewed-by: Hannes Reinecke hare@suse.de Signed-off-by: John Meneghini johnm@netapp.com Signed-off-by: Keith Busch kbusch@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 5 +---- drivers/nvme/host/multipath.c | 21 +++++++++------------ drivers/nvme/host/nvme.h | 5 +++-- 3 files changed, 13 insertions(+), 18 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3cb017fa3a790..05688fa579e81 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -288,11 +288,8 @@ void nvme_complete_rq(struct request *req) nvme_req(req)->ctrl->comp_seen = true;
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && - blk_path_error(status)) { - nvme_failover_req(req); + if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) return; - }
if (!blk_queue_dying(req->q)) { nvme_retry_req(req); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 0a458f7880887..3968f89f7855a 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -65,17 +65,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } }
-void nvme_failover_req(struct request *req) +bool nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status; unsigned long flags;
- spin_lock_irqsave(&ns->head->requeue_lock, flags); - blk_steal_bios(&ns->head->requeue_list, req); - spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); - switch (status & 0x7ff) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: @@ -104,15 +99,17 @@ void nvme_failover_req(struct request *req) nvme_mpath_clear_current_path(ns); break; default: - /* - * Reset the controller for any non-ANA error as we don't know - * what caused the error. - */ - nvme_reset_ctrl(ns->ctrl); - break; + /* This was a non-ANA error so follow the normal error path. */ + return false; }
+ spin_lock_irqsave(&ns->head->requeue_lock, flags); + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); + return true; }
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 056953bd8bd81..80bfffa943ccd 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -530,7 +530,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -void nvme_failover_req(struct request *req); +bool nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); @@ -579,8 +579,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); }
-static inline void nvme_failover_req(struct request *req) +static inline bool nvme_failover_req(struct request *req) { + return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) {
From: Israel Rukshin israelr@mellanox.com
[ Upstream commit b780d7415aacec855e2f2370cbf98f918b224903 ]
In case nvme_sysfs_delete() is called by the user before taking the ctrl reference count, the ctrl may be freed during the creation and cause the bug. Take the reference as soon as the controller is externally visible, which is done by cdev_device_add() in nvme_init_ctrl(). Also take the reference count at the core layer instead of taking it on each transport separately.
Signed-off-by: Israel Rukshin israelr@mellanox.com Reviewed-by: Max Gurtovoy maxg@mellanox.com Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Keith Busch kbusch@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 2 ++ drivers/nvme/host/fc.c | 4 +--- drivers/nvme/host/pci.c | 1 - drivers/nvme/host/rdma.c | 3 +-- drivers/nvme/host/tcp.c | 3 +-- drivers/nvme/target/loop.c | 3 +-- 6 files changed, 6 insertions(+), 10 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 05688fa579e81..e51cc83969034 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4082,6 +4082,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, if (ret) goto out_release_instance;
+ nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); ctrl->cdev.owner = ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); @@ -4100,6 +4101,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
return 0; out_free_name: + nvme_put_ctrl(ctrl); kfree_const(ctrl->device->kobj.name); out_release_instance: ida_simple_remove(&nvme_instance_ida, ctrl->instance); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index dce4d6782ceb1..e5b5ded422b33 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3170,10 +3170,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto fail_ctrl; }
- nvme_get_ctrl(&ctrl->ctrl); - if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { - nvme_put_ctrl(&ctrl->ctrl); dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to schedule initial connect\n", ctrl->cnum); @@ -3198,6 +3195,7 @@ fail_ctrl:
/* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl);
/* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 100da11ce98cb..f6c35c135764c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2850,7 +2850,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
nvme_reset_ctrl(&dev->ctrl); - nvme_get_ctrl(&dev->ctrl); async_schedule(nvme_async_probe, dev);
return 0; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index d0336545e1fe0..0bf79bf9d708f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2053,8 +2053,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN "%s", addr %pISpcs\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_rdma_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); @@ -2064,6 +2062,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 0166ff0e4738e..187122129a1da 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2369,8 +2369,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: NQN "%s", addr %pISp\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- nvme_get_ctrl(&ctrl->ctrl); - mutex_lock(&nvme_tcp_ctrl_mutex); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); mutex_unlock(&nvme_tcp_ctrl_mutex); @@ -2380,6 +2378,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 11f5aea97d1b1..82b87a4c50f63 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -619,8 +619,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, dev_info(ctrl->ctrl.device, "new ctrl: "%s"\n", ctrl->ctrl.opts->subsysnqn);
- nvme_get_ctrl(&ctrl->ctrl); - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed);
@@ -638,6 +636,7 @@ out_free_queues: kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); + nvme_put_ctrl(&ctrl->ctrl); out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); if (ret > 0)
From: Israel Rukshin israelr@mellanox.com
[ Upstream commit ce1518139e6976cf19c133b555083354fdb629b8 ]
Calling nvme_sysfs_delete() when the controller is in the middle of creation may cause several bugs. If the controller is in NEW state we remove delete_controller file and don't delete the controller. The user will not be able to use nvme disconnect command on that controller again, although the controller may be active. Other bugs may happen if the controller is in the middle of create_ctrl callback and nvme_do_delete_ctrl() starts. For example, freeing I/O tagset at nvme_do_delete_ctrl() before it was allocated at create_ctrl callback.
To fix all those races don't allow the user to delete the controller before it was fully created.
Signed-off-by: Israel Rukshin israelr@mellanox.com Reviewed-by: Max Gurtovoy maxg@mellanox.com Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Keith Busch kbusch@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 5 +++++ drivers/nvme/host/nvme.h | 1 + 2 files changed, 6 insertions(+)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e51cc83969034..f01fe2d910b54 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3197,6 +3197,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ /* Can't delete non-created controllers */ + if (!ctrl->created) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -3992,6 +3996,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) nvme_queue_scan(ctrl); nvme_start_queues(ctrl); } + ctrl->created = true; } EXPORT_SYMBOL_GPL(nvme_start_ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 80bfffa943ccd..7d57c42a641ca 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -253,6 +253,7 @@ struct nvme_ctrl { struct nvme_command ka_cmd; struct work_struct fw_act_work; unsigned long events; + bool created;
#ifdef CONFIG_NVME_MULTIPATH /* asymmetric namespace access: */
From: Raveendran Somu raveendran.somu@cypress.com
[ Upstream commit 78179869dc3f5c0059bbf5d931a2717f1ad97ecd ]
When the brcmf_fws_process_skb() fails to get hanger slot for queuing the skb, it tries to free the skb. But the caller brcmf_netdev_start_xmit() of that funciton frees the packet on error return value. This causes the double freeing and which caused the kernel crash.
Signed-off-by: Raveendran Somu raveendran.somu@cypress.com Signed-off-by: Chi-hsien Lin chi-hsien.lin@cypress.com Signed-off-by: Kalle Valo kvalo@codeaurora.org Link: https://lore.kernel.org/r/1585124429-97371-3-git-send-email-chi-hsien.lin@cy... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index eadc64454839d..3d36b6ee158bb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -2149,8 +2149,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); brcmf_fws_schedule_deq(fws); } else { - bphy_err(drvr, "drop skb: no hanger slot\n"); - brcmf_txfinalize(ifp, skb, false); + bphy_err(drvr, "no hanger slot available\n"); rc = -ENOMEM; } brcmf_fws_unlock(fws);
From: "Darrick J. Wong" darrick.wong@oracle.com
[ Upstream commit 27fb5a72f50aa770dd38b0478c07acacef97e3e7 ]
I noticed that fsfreeze can take a very long time to freeze an XFS if there happens to be a GETFSMAP caller running in the background. I also happened to notice the following in dmesg:
------------[ cut here ]------------ WARNING: CPU: 2 PID: 43492 at fs/xfs/xfs_super.c:853 xfs_quiesce_attr+0x83/0x90 [xfs] Modules linked in: xfs libcrc32c ip6t_REJECT nf_reject_ipv6 ipt_REJECT nf_reject_ipv4 ip_set_hash_ip ip_set_hash_net xt_tcpudp xt_set ip_set_hash_mac ip_set nfnetlink ip6table_filter ip6_tables bfq iptable_filter sch_fq_codel ip_tables x_tables nfsv4 af_packet [last unloaded: xfs] CPU: 2 PID: 43492 Comm: xfs_io Not tainted 5.6.0-rc4-djw #rc4 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.10.2-1ubuntu1 04/01/2014 RIP: 0010:xfs_quiesce_attr+0x83/0x90 [xfs] Code: 7c 07 00 00 85 c0 75 22 48 89 df 5b e9 96 c1 00 00 48 c7 c6 b0 2d 38 a0 48 89 df e8 57 64 ff ff 8b 83 7c 07 00 00 85 c0 74 de <0f> 0b 48 89 df 5b e9 72 c1 00 00 66 90 0f 1f 44 00 00 41 55 41 54 RSP: 0018:ffffc900030f3e28 EFLAGS: 00010202 RAX: 0000000000000001 RBX: ffff88802ac54000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffffffff81e4a6f0 RDI: 00000000ffffffff RBP: ffff88807859f070 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000010 R12: 0000000000000000 R13: ffff88807859f388 R14: ffff88807859f4b8 R15: ffff88807859f5e8 FS: 00007fad1c6c0fc0(0000) GS:ffff88807e000000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f0c7d237000 CR3: 0000000077f01003 CR4: 00000000001606a0 Call Trace: xfs_fs_freeze+0x25/0x40 [xfs] freeze_super+0xc8/0x180 do_vfs_ioctl+0x70b/0x750 ? __fget_files+0x135/0x210 ksys_ioctl+0x3a/0xb0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x50/0x1a0 entry_SYSCALL_64_after_hwframe+0x49/0xbe
These two things appear to be related. The assertion trips when another thread initiates a fsmap request (which uses an empty transaction) after the freezer waited for m_active_trans to hit zero but before the the freezer executes the WARN_ON just prior to calling xfs_log_quiesce.
The lengthy delays in freezing happen because the freezer calls xfs_wait_buftarg to clean out the buffer lru list. Meanwhile, the GETFSMAP caller is continuing to grab and release buffers, which means that it can take a very long time for the buffer lru list to empty out.
We fix both of these races by calling sb_start_write to obtain freeze protection while using empty transactions for GETFSMAP and for metadata scrubbing. The other two users occur during mount, during which time we cannot fs freeze.
Signed-off-by: Darrick J. Wong darrick.wong@oracle.com Reviewed-by: Dave Chinner dchinner@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/xfs/scrub/scrub.c | 9 +++++++++ fs/xfs/xfs_fsmap.c | 9 +++++++++ fs/xfs/xfs_trans.c | 5 +++++ 3 files changed, 23 insertions(+)
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 15c8c5f3f688d..720bef5779989 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -167,6 +167,7 @@ xchk_teardown( xfs_irele(sc->ip); sc->ip = NULL; } + sb_end_write(sc->mp->m_super); if (sc->flags & XCHK_REAPING_DISABLED) xchk_start_reaping(sc); if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) { @@ -489,6 +490,14 @@ xfs_scrub_metadata( sc.ops = &meta_scrub_ops[sm->sm_type]; sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type); retry_op: + /* + * If freeze runs concurrently with a scrub, the freeze can be delayed + * indefinitely as we walk the filesystem and iterate over metadata + * buffers. Freeze quiesces the log (which waits for the buffer LRU to + * be emptied) and that won't happen while checking is running. + */ + sb_start_write(mp->m_super); + /* Set up for the operation. */ error = sc.ops->setup(&sc, ip); if (error) diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index d082143feb5ab..c13754e119be1 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -895,6 +895,14 @@ xfs_getfsmap( info.format_arg = arg; info.head = head;
+ /* + * If fsmap runs concurrently with a scrub, the freeze can be delayed + * indefinitely as we walk the rmapbt and iterate over metadata + * buffers. Freeze quiesces the log (which waits for the buffer LRU to + * be emptied) and that won't happen while we're reading buffers. + */ + sb_start_write(mp->m_super); + /* For each device we support... */ for (i = 0; i < XFS_GETFSMAP_DEVS; i++) { /* Is this device within the range the user asked for? */ @@ -934,6 +942,7 @@ xfs_getfsmap(
if (tp) xfs_trans_cancel(tp); + sb_end_write(mp->m_super); head->fmh_oflags = FMH_OF_DEV_T; return error; } diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index f4795fdb7389c..b32a66452d441 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -306,6 +306,11 @@ xfs_trans_alloc( * * Note the zero-length reservation; this transaction MUST be cancelled * without any dirty data. + * + * Callers should obtain freeze protection to avoid two conflicts with fs + * freezing: (1) having active transactions trip the m_active_trans ASSERTs; + * and (2) grabbing buffers at the same time that freeze is trying to drain + * the buffer LRU list. */ int xfs_trans_alloc_empty(
From: Zhu Yanjun yanjunz@mellanox.com
[ Upstream commit d0ca2c35dd15a3d989955caec02beea02f735ee6 ]
The RXE driver doesn't set sys_image_guid and user space applications see zeros. This causes to pyverbs tests to fail with the following traceback, because the IBTA spec requires to have valid sys_image_guid.
Traceback (most recent call last): File "./tests/test_device.py", line 51, in test_query_device self.verify_device_attr(attr) File "./tests/test_device.py", line 74, in verify_device_attr assert attr.sys_image_guid != 0
In order to fix it, set sys_image_guid to be equal to node_guid.
Before: 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid 0000:0000:0000:0000
After: 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid 5054:00ff:feaa:5363
Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200323112800.1444784-1-leon@kernel.org Signed-off-by: Zhu Yanjun yanjunz@mellanox.com Signed-off-by: Leon Romanovsky leonro@mellanox.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/sw/rxe/rxe.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index a8c11b5e1e943..a92aca1745c16 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -116,6 +116,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN; rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; + addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, + rxe->ndev->dev_addr);
rxe->max_ucontext = RXE_MAX_UCONTEXT; }
From: Sergey Gorenko sergeygo@mellanox.com
[ Upstream commit 26e28deb813eed908cf31a6052870b6493ec0e86 ]
libiscsi calls the check_protection transport handler only if SCSI-Respose is received. So, the handler is never called if iSCSI task is completed for some other reason like a timeout or error handling. And this behavior looks correct. But the iSER does not handle this case properly because it puts a non-checked signature MR to the free pool. Then the error occurs at reusing the MR because it is not allowed to invalidate a signature MR without checking.
This commit adds an extra check to iser_unreg_mem_fastreg(), which is a part of the task cleanup flow. Now the signature MR is checked there if it is needed.
Link: https://lore.kernel.org/r/20200325151210.1548-1-sergeygo@mellanox.com Signed-off-by: Sergey Gorenko sergeygo@mellanox.com Reviewed-by: Max Gurtovoy maxg@mellanox.com Signed-off-by: Jason Gunthorpe jgg@mellanox.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/infiniband/ulp/iser/iser_memory.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2cc89a9b9e9bb..ea8e611397a3b 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -292,12 +292,27 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, { struct iser_device *device = iser_task->iser_conn->ib_conn.device; struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir]; + struct iser_fr_desc *desc; + struct ib_mr_status mr_status;
- if (!reg->mem_h) + desc = reg->mem_h; + if (!desc) return;
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, - reg->mem_h); + /* + * The signature MR cannot be invalidated and reused without checking. + * libiscsi calls the check_protection transport handler only if + * SCSI-Response is received. And the signature MR is not checked if + * the task is completed for some other reason like a timeout or error + * handling. That's why we must check the signature MR here before + * putting it to the free pool. + */ + if (unlikely(desc->sig_protected)) { + desc->sig_protected = false; + ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS, + &mr_status); + } + device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc); reg->mem_h = NULL; }
From: Don Brace don.brace@microsemi.com
[ Upstream commit 3e16e83a62edac7617bfd8dbb4e55d04ff6adbe1 ]
Correct race condition where ioaccel is re-enabled before the raid_map is updated. For RAID_1, RAID_1ADM, and RAID 5/6 there is a BUG_ON called which is bad.
- Change event thread to disable ioaccel only. Send all requests down the RAID path instead.
- Have rescan thread handle offload_enable.
- Since there is only one rescan allowed at a time, turning offload_enabled on/off should not be racy. Each handler queues up a rescan if one is already in progress.
- For timing diagram, offload_enabled is initially off due to a change (transformation: splitmirror/remirror), ...
otbe = offload_to_be_enabled oe = offload_enabled
Time Event Rescan Completion Request Worker Worker Thread Thread ---- ------ ------ ---------- ------- T0 | | + UA | T1 | + rescan started | 0x3f | T2 + Event | | 0x0e | T3 + Ack msg | | | T4 | + if (!dev[i]->oe && | | T5 | | dev[i]->otbe) | | T6 | | get_raid_map | | T7 + otbe = 1 | | | T8 | | | | T9 | + oe = otbe | | T10 | | | + ioaccel request T11 * BUG_ON
T0 - I/O completion with UA 0x3f 0x0e sets rescan flag. T1 - rescan worker thread starts a rescan. T2 - event comes in T3 - event thread starts and issues "Acknowledge" message ... T6 - rescan thread has bypassed code to reload new raid map. ... T7 - event thread runs and sets offload_to_be_enabled ... T9 - rescan thread turns on offload_enabled. T10- request comes in and goes down ioaccel path. T11- BUG_ON.
- After the patch is applied, ioaccel_enabled can only be re-enabled in the re-scan thread.
Link: https://lore.kernel.org/r/158472877894.14200.7077843399036368335.stgit@brunh... Reviewed-by: Scott Teel scott.teel@microsemi.com Reviewed-by: Matt Perricone matt.perricone@microsemi.com Reviewed-by: Scott Benesh scott.benesh@microsemi.com Signed-off-by: Don Brace don.brace@microsemi.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/hpsa.c | 80 ++++++++++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 23 deletions(-)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 216e557f703e6..e67cb4561aace 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -504,6 +504,12 @@ static ssize_t host_store_rescan(struct device *dev, return count; }
+static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) +{ + device->offload_enabled = 0; + device->offload_to_be_enabled = 0; +} + static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1738,8 +1744,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, __func__, h->scsi_host->host_no, logical_drive->bus, logical_drive->target, logical_drive->lun); - logical_drive->offload_enabled = 0; - logical_drive->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(logical_drive); logical_drive->queue_depth = 8; } } @@ -2499,8 +2504,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h, IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { - dev->offload_enabled = 0; - dev->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(dev); }
if (dev->in_reset) { @@ -3670,10 +3674,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (this_device->offload_config) { - this_device->offload_to_be_enabled = + bool offload_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); - if (hpsa_get_raid_map(h, scsi3addr, this_device)) - this_device->offload_to_be_enabled = 0; + /* + * Check to see if offload can be enabled. + */ + if (offload_enabled) { + rc = hpsa_get_raid_map(h, scsi3addr, this_device); + if (rc) /* could not load raid_map */ + goto out; + this_device->offload_to_be_enabled = 1; + } }
out: @@ -3996,8 +4007,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; - this_device->offload_enabled = 0; - this_device->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(this_device); this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; @@ -5230,8 +5240,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, /* Handles load balance across RAID 1 members. * (2-drive R1 and R10 with even # of drives.) * Appropriate for SSDs, not optimal for HDDs + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 2); + if (le16_to_cpu(map->layout_map_count) != 2) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } if (dev->offload_to_mirror) map_index += le16_to_cpu(map->data_disks_per_row); dev->offload_to_mirror = !dev->offload_to_mirror; @@ -5239,8 +5253,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, case HPSA_RAID_ADM: /* Handles N-way mirrors (R1-ADM) * and R10 with # of drives divisible by 3.) + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 3); + if (le16_to_cpu(map->layout_map_count) != 3) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + }
offload_to_mirror = dev->offload_to_mirror; raid_map_helper(map, offload_to_mirror, @@ -5265,7 +5283,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, r5or6_blocks_per_row = le16_to_cpu(map->strip_size) * le16_to_cpu(map->data_disks_per_row); - BUG_ON(r5or6_blocks_per_row == 0); + if (r5or6_blocks_per_row == 0) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } stripesize = r5or6_blocks_per_row * le16_to_cpu(map->layout_map_count); #if BITS_PER_LONG == 32 @@ -8285,7 +8306,7 @@ static int detect_controller_lockup(struct ctlr_info *h) * * Called from monitor controller worker (hpsa_event_monitor_worker) * - * A Volume (or Volumes that comprise an Array set may be undergoing a + * A Volume (or Volumes that comprise an Array set) may be undergoing a * transformation, so we will be turning off ioaccel for all volumes that * make up the Array. */ @@ -8308,6 +8329,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) * Run through current device list used during I/O requests. */ for (i = 0; i < h->ndevices; i++) { + int offload_to_be_enabled = 0; + int offload_config = 0; + device = h->dev[i];
if (!device) @@ -8325,25 +8349,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) continue;
ioaccel_status = buf[IOACCEL_STATUS_BYTE]; - device->offload_config = + + /* + * Check if offload is still configured on + */ + offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); - if (device->offload_config) - device->offload_to_be_enabled = + /* + * If offload is configured on, check to see if ioaccel + * needs to be enabled. + */ + if (offload_config) + offload_to_be_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+ /* + * If ioaccel is to be re-enabled, re-enable later during the + * scan operation so the driver can get a fresh raidmap + * before turning ioaccel back on. + */ + if (offload_to_be_enabled) + continue; + /* * Immediately turn off ioaccel for any volume the * controller tells us to. Some of the reasons could be: * transformation - change to the LVs of an Array. * degraded volume - component failure - * - * If ioaccel is to be re-enabled, re-enable later during the - * scan operation so the driver can get a fresh raidmap - * before turning ioaccel back on. - * */ - if (!device->offload_to_be_enabled) - device->offload_enabled = 0; + hpsa_turn_off_ioaccel_for_device(device); }
kfree(buf);
From: Christophe JAILLET christophe.jaillet@wanadoo.fr
[ Upstream commit b25b60d7bfb02a74bc3c2d998e09aab159df8059 ]
'maxlen' is the total size of the destination buffer. There is only one caller and this value is 256.
When we compute the size already used and what we would like to add in the buffer, the trailling NULL character is not taken into account. However, this trailling character will be added by the 'strcat' once we have checked that we have enough place.
So, there is a off-by-one issue and 1 byte of the stack could be erroneously overwridden.
Take into account the trailling NULL, when checking if there is enough place in the destination buffer.
While at it, also replace a 'sprintf' by a safer 'snprintf', check for output truncation and avoid a superfluous 'strlen'.
Fixes: dc9a16e49dbba ("svc: Add /proc/sys/sunrpc/transport files") Signed-off-by: Christophe JAILLET christophe.jaillet@wanadoo.fr [ cel: very minor fix to documenting comment Signed-off-by: Chuck Lever chuck.lever@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/sunrpc/svc_xprt.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index dc74519286be5..fe4cd0b4c4127 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -104,8 +104,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) } EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
-/* - * Format the transport list for printing +/** + * svc_print_xprts - Format the transport list for printing + * @buf: target buffer for formatted address + * @maxlen: length of target buffer + * + * Fills in @buf with a string containing a list of transport names, each name + * terminated with '\n'. If the buffer is too small, some entries may be + * missing, but it is guaranteed that all lines in the output buffer are + * complete. + * + * Returns positive length of the filled-in string. */ int svc_print_xprts(char *buf, int maxlen) { @@ -118,9 +127,9 @@ int svc_print_xprts(char *buf, int maxlen) list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { int slen;
- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); - slen = strlen(tmpstr); - if (len + slen > maxlen) + slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", + xcl->xcl_name, xcl->xcl_max_payload); + if (slen >= sizeof(tmpstr) || len + slen >= maxlen) break; len += slen; strcat(buf, tmpstr);
From: Chuck Lever chuck.lever@oracle.com
[ Upstream commit 1a33d8a284b1e85e03b8c7b1ea8fb985fccd1d71 ]
Kernel memory leak detected:
unreferenced object 0xffff888849cdf480 (size 8): comm "kworker/u8:3", pid 2086, jiffies 4297898756 (age 4269.856s) hex dump (first 8 bytes): 30 00 cd 49 88 88 ff ff 0..I.... backtrace: [<00000000acfc370b>] __kmalloc_track_caller+0x137/0x183 [<00000000a2724354>] kstrdup+0x2b/0x43 [<0000000082964f84>] xprt_rdma_format_addresses+0x114/0x17d [rpcrdma] [<00000000dfa6ed00>] xprt_setup_rdma_bc+0xc0/0x10c [rpcrdma] [<0000000073051a83>] xprt_create_transport+0x3f/0x1a0 [sunrpc] [<0000000053531a8e>] rpc_create+0x118/0x1cd [sunrpc] [<000000003a51b5f8>] setup_callback_client+0x1a5/0x27d [nfsd] [<000000001bd410af>] nfsd4_process_cb_update.isra.7+0x16c/0x1ac [nfsd] [<000000007f4bbd56>] nfsd4_run_cb_work+0x4c/0xbd [nfsd] [<0000000055c5586b>] process_one_work+0x1b2/0x2fe [<00000000b1e3e8ef>] worker_thread+0x1a6/0x25a [<000000005205fb78>] kthread+0xf6/0xfb [<000000006d2dc057>] ret_from_fork+0x3a/0x50
Introduce a call to xprt_rdma_free_addresses() similar to the way that the TCP backchannel releases a transport's peer address strings.
Fixes: 5d252f90a800 ("svcrdma: Add class for RDMA backwards direction transport") Signed-off-by: Chuck Lever chuck.lever@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index cf80394b2db33..325eef1f85824 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -252,6 +252,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) { dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
+ xprt_rdma_free_addresses(xprt); xprt_free(xprt); }
From: Qian Cai cai@lca.pw
[ Upstream commit 0a6a9515fe390976cd762c52d8d4f446d7a14285 ]
It is safe to traverse &net->nft.tables with &net->nft.commit_mutex held using list_for_each_entry_rcu(). Silence the PROVE_RCU_LIST false positive,
WARNING: suspicious RCU usage net/netfilter/nf_tables_api.c:523 RCU-list traversed in non-reader section!!
other info that might help us debug this:
rcu_scheduler_active = 2, debug_locks = 1 1 lock held by iptables/1384: #0: ffffffff9745c4a8 (&net->nft.commit_mutex){+.+.}, at: nf_tables_valid_genid+0x25/0x60 [nf_tables]
Call Trace: dump_stack+0xa1/0xea lockdep_rcu_suspicious+0x103/0x10d nft_table_lookup.part.0+0x116/0x120 [nf_tables] nf_tables_newtable+0x12c/0x7d0 [nf_tables] nfnetlink_rcv_batch+0x559/0x1190 [nfnetlink] nfnetlink_rcv+0x1da/0x210 [nfnetlink] netlink_unicast+0x306/0x460 netlink_sendmsg+0x44b/0x770 ____sys_sendmsg+0x46b/0x4a0 ___sys_sendmsg+0x138/0x1a0 __sys_sendmsg+0xb6/0x130 __x64_sys_sendmsg+0x48/0x50 do_syscall_64+0x69/0xf4 entry_SYSCALL_64_after_hwframe+0x49/0xb3
Signed-off-by: Qian Cai cai@lca.pw Acked-by: Florian Westphal fw@strlen.de Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/netfilter/nf_tables_api.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2023650c27249..ff2d2b514506e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -456,7 +456,8 @@ static struct nft_table *nft_table_lookup(const struct net *net, if (nla == NULL) return ERR_PTR(-EINVAL);
- list_for_each_entry_rcu(table, &net->nft.tables, list) { + list_for_each_entry_rcu(table, &net->nft.tables, list, + lockdep_is_held(&net->nft.commit_mutex)) { if (!nla_strcmp(nla, table->name) && table->family == family && nft_active_genmask(table, genmask))
From: Mikel Rychliski mikel@mikelr.com
[ Upstream commit 72e0ef0e5f067fd991f702f0b2635d911d0cf208 ]
On some EFI systems, the video BIOS is provided by the EFI firmware. The boot stub code stores the physical address of the ROM image in pdev->rom. Currently we attempt to access this pointer using phys_to_virt(), which doesn't work with CONFIG_HIGHMEM.
On these systems, attempting to load the radeon module on a x86_32 kernel can result in the following:
BUG: unable to handle page fault for address: 3e8ed03c #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page *pde = 00000000 Oops: 0000 [#1] PREEMPT SMP CPU: 0 PID: 317 Comm: systemd-udevd Not tainted 5.6.0-rc3-next-20200228 #2 Hardware name: Apple Computer, Inc. MacPro1,1/Mac-F4208DC8, BIOS MP11.88Z.005C.B08.0707021221 07/02/07 EIP: radeon_get_bios+0x5ed/0xe50 [radeon] Code: 00 00 84 c0 0f 85 12 fd ff ff c7 87 64 01 00 00 00 00 00 00 8b 47 08 8b 55 b0 e8 1e 83 e1 d6 85 c0 74 1a 8b 55 c0 85 d2 74 13 <80> 38 55 75 0e 80 78 01 aa 0f 84 a4 03 00 00 8d 74 26 00 68 dc 06 EAX: 3e8ed03c EBX: 00000000 ECX: 3e8ed03c EDX: 00010000 ESI: 00040000 EDI: eec04000 EBP: eef3fc60 ESP: eef3fbe0 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010206 CR0: 80050033 CR2: 3e8ed03c CR3: 2ec77000 CR4: 000006d0 Call Trace: r520_init+0x26/0x240 [radeon] radeon_device_init+0x533/0xa50 [radeon] radeon_driver_load_kms+0x80/0x220 [radeon] drm_dev_register+0xa7/0x180 [drm] radeon_pci_probe+0x10f/0x1a0 [radeon] pci_device_probe+0xd4/0x140
Fix the issue by updating all drivers which can access a platform provided ROM. Instead of calling the helper function pci_platform_rom() which uses phys_to_virt(), call ioremap() directly on the pdev->rom.
radeon_read_platform_bios() previously directly accessed an __iomem pointer. Avoid this by calling memcpy_fromio() instead of kmemdup().
pci_platform_rom() now has no remaining callers, so remove it.
Link: https://lore.kernel.org/r/20200319021623.5426-1-mikel@mikelr.com Signed-off-by: Mikel Rychliski mikel@mikelr.com Signed-off-by: Bjorn Helgaas bhelgaas@google.com Acked-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 31 +++++++++++-------- .../drm/nouveau/nvkm/subdev/bios/shadowpci.c | 17 ++++++++-- drivers/gpu/drm/radeon/radeon_bios.c | 30 +++++++++++------- drivers/pci/rom.c | 17 ---------- include/linux/pci.h | 1 - 5 files changed, 52 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 50dff69a0f6e3..b1172d93c99c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -192,30 +192,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios;
adev->bios = NULL;
- bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - }
- adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false;
- memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios;
- if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios);
- adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen;
return true; +free_bios: + kfree(adev->bios); + return false; }
#ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09dc5f8..8d9812a51ef63 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) else return ERR_PTR(-ENODEV);
+ if (!pdev->rom || pdev->romlen == 0) + return ERR_PTR(-ENODEV); + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { + priv->size = pdev->romlen; if (ret = -ENODEV, - (priv->rom = pci_platform_rom(pdev, &priv->size))) + (priv->rom = ioremap(pdev->rom, pdev->romlen))) return priv; kfree(priv); } @@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); }
+static void +platform_fini(void *data) +{ + struct priv *priv = data; + + iounmap(priv->rom); + kfree(priv); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 4d1490fbb0750..756a50e8aff20 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -108,25 +108,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_read_platform_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = rdev->pdev->rom; + size_t romlen = rdev->pdev->romlen; + void __iomem *bios;
rdev->bios = NULL;
- bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - }
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + rdev->bios = kzalloc(romlen, GFP_KERNEL); + if (!rdev->bios) return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } + + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(rdev->bios, bios, romlen); + iounmap(bios); + + if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) + goto free_bios;
return true; +free_bios: + kfree(rdev->bios); + return false; }
#ifdef CONFIG_ACPI diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 137bf0cee897c..8fc9a4e911e3a 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) pci_disable_rom(pdev); } EXPORT_SYMBOL(pci_unmap_rom); - -/** - * pci_platform_rom - provides a pointer to any ROM image provided by the - * platform - * @pdev: pointer to pci device struct - * @size: pointer to receive size of pci window over ROM - */ -void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) -{ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt((phys_addr_t)pdev->rom); - } - - return NULL; -} -EXPORT_SYMBOL(pci_platform_rom); diff --git a/include/linux/pci.h b/include/linux/pci.h index f39f22f9ee474..e92bd9b32f369 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1216,7 +1216,6 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */ int pci_save_state(struct pci_dev *dev);
From: Zhihao Cheng chengzhihao1@huawei.com
[ Upstream commit 81423c78551654953d746250f1721300b470be0e ]
When inodes with extended attributes are evicted, xent is not freed in one exit branch.
Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Fixes: 9ca2d732644484488db3112 ("ubifs: Limit number of xattrs per inode") Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ubifs/journal.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index a6ae2428e4c96..5f2ac5ef0891e 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -906,6 +906,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) ubifs_err(c, "dead directory entry '%s', error %d", xent->name, err); ubifs_ro_mode(c, err); + kfree(xent); goto out_release; } ubifs_assert(c, ubifs_inode(xino)->xattr);
From: Zhihao Cheng chengzhihao1@huawei.com
[ Upstream commit 927cc5cec35f01fe4f8af0ba80830a90b0533983 ]
Memory leak occurs when files with extended attributes are added to orphan list.
Signed-off-by: Zhihao Cheng chengzhihao1@huawei.com Fixes: 988bec41318f3fa897e2f8 ("ubifs: orphan: Handle xattrs like files") Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ubifs/orphan.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 7dd740e3692da..283f9eb48410d 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -157,7 +157,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) int err = 0; ino_t xattr_inum; union ubifs_key key; - struct ubifs_dent_node *xent; + struct ubifs_dent_node *xent, *pxent = NULL; struct fscrypt_name nm = {0}; struct ubifs_orphan *xattr_orphan; struct ubifs_orphan *orphan; @@ -181,11 +181,16 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) xattr_inum = le64_to_cpu(xent->inum);
xattr_orphan = orphan_add(c, xattr_inum, orphan); - if (IS_ERR(xattr_orphan)) + if (IS_ERR(xattr_orphan)) { + kfree(xent); return PTR_ERR(xattr_orphan); + }
+ kfree(pxent); + pxent = xent; key_read(c, &xent->key, &key); } + kfree(pxent);
return 0; }
From: Liu Song liu.song11@zte.com.cn
[ Upstream commit acc5af3efa303d5f36cc8c0f61716161f6ca1384 ]
In “ubifs_check_node”, when the value of "node_len" is abnormal, the code will goto label of "out_len" for execution. Then, in the following "ubifs_dump_node", if inode type is "UBIFS_DATA_NODE", in "print_hex_dump", an out-of-bounds access may occur due to the wrong "ch->len".
Therefore, when the value of "node_len" is abnormal, data length should to be adjusted to a reasonable safe range. At this time, structured data is not credible, so dump the corrupted data directly for analysis.
Signed-off-by: Liu Song liu.song11@zte.com.cn Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ubifs/io.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 8ceb51478800b..7e4bfaf2871fa 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -225,7 +225,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc) { - int err = -EINVAL, type, node_len; + int err = -EINVAL, type, node_len, dump_node = 1; uint32_t crc, node_crc, magic; const struct ubifs_ch *ch = buf;
@@ -278,10 +278,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, out_len: if (!quiet) ubifs_err(c, "bad node length %d", node_len); + if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) + dump_node = 0; out: if (!quiet) { ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); - ubifs_dump_node(c, buf); + if (dump_node) { + ubifs_dump_node(c, buf); + } else { + int safe_len = min3(node_len, c->leb_size - offs, + (int)UBIFS_MAX_DATA_NODE_SZ); + pr_err("\tprevent out-of-bounds memory access\n"); + pr_err("\ttruncated data node length %d\n", safe_len); + pr_err("\tcorrupted data node:\n"); + print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, + buf, safe_len, 0); + } dump_stack(); } return err;
From: Andreas Steinmetz ast@domdv.de
[ Upstream commit 5c6cd7021a05a02fcf37f360592d7c18d4d807fb ]
The Miditech MIDIFACE 16x16 (USB ID 1290:1749) has more than one extra endpoint descriptor.
The first extra descriptor is: 0x06 0x30 0x00 0x00 0x00 0x00
As the code in snd_usbmidi_get_ms_info() looks only at the first extra descriptor to find USB_DT_CS_ENDPOINT the device as such is recognized but there is neither input nor output configured.
The patch iterates through the extra descriptors to find the proper one. With this patch the device is correctly configured.
Signed-off-by: Andreas Steinmetz ast@domdv.de Link: https://lore.kernel.org/r/1c3b431a86f69e1d60745b6110cdb93c299f120b.camel@dom... Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/usb/midi.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-)
diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 0cb4142b05f64..bc9068b616bb9 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1827,6 +1827,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, return 0; }
+static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( + struct usb_host_endpoint *hostep) +{ + unsigned char *extra = hostep->extra; + int extralen = hostep->extralen; + + while (extralen > 3) { + struct usb_ms_endpoint_descriptor *ms_ep = + (struct usb_ms_endpoint_descriptor *)extra; + + if (ms_ep->bLength > 3 && + ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && + ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) + return ms_ep; + if (!extra[0]) + break; + extralen -= extra[0]; + extra += extra[0]; + } + return NULL; +} + /* * Returns MIDIStreaming device capabilities. */ @@ -1864,11 +1886,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, ep = get_ep_desc(hostep); if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) continue; - ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; - if (hostep->extralen < 4 || - ms_ep->bLength < 4 || - ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || - ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) + ms_ep = find_usb_ms_endpoint_descriptor(hostep); + if (!ms_ep) continue; if (usb_endpoint_dir_out(ep)) { if (endpoints[epidx].out_ep) {
From: Stuart Hayes stuart.w.hayes@gmail.com
[ Upstream commit 8edf5332c39340b9583cf9cba659eb7ec71f75b5 ]
Without this commit, a PCIe hotplug port can stop generating interrupts on hotplug events, so device adds and removals will not be seen:
The pciehp interrupt handler pciehp_isr() reads the Slot Status register and then writes back to it to clear the bits that caused the interrupt. If a different interrupt event bit gets set between the read and the write, pciehp_isr() returns without having cleared all of the interrupt event bits. If this happens when the MSI isn't masked (which by default it isn't in handle_edge_irq(), and which it will never be when MSI per-vector masking is not supported), we won't get any more hotplug interrupts from that device.
That is expected behavior, according to the PCIe Base Spec r5.0, section 6.7.3.4, "Software Notification of Hot-Plug Events".
Because the Presence Detect Changed and Data Link Layer State Changed event bits can both get set at nearly the same time when a device is added or removed, this is more likely to happen than it might seem. The issue was found (and can be reproduced rather easily) by connecting and disconnecting an NVMe storage device on at least one system model where the NVMe devices were being connected to an AMD PCIe port (PCI device 0x1022/0x1483).
Fix the issue by modifying pciehp_isr() to loop back and re-read the Slot Status register immediately after writing to it, until it sees that all of the event status bits have been cleared.
[lukas: drop loop count limitation, write "events" instead of "status", don't loop back in INTx and poll modes, tweak code comment & commit msg] Link: https://lore.kernel.org/r/78b4ced5072bfe6e369d20e8b47c279b8c7af12e.158212161... Tested-by: Stuart Hayes stuart.w.hayes@gmail.com Signed-off-by: Stuart Hayes stuart.w.hayes@gmail.com Signed-off-by: Lukas Wunner lukas@wunner.de Signed-off-by: Bjorn Helgaas bhelgaas@google.com Reviewed-by: Joerg Roedel jroedel@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pci/hotplug/pciehp_hpc.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 356786a3b7f4b..88b996764ff95 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -529,7 +529,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct device *parent = pdev->dev.parent; - u16 status, events; + u16 status, events = 0;
/* * Interrupts only occur in D3hot or shallower and only if enabled @@ -554,6 +554,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) } }
+read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); if (status == (u16) ~0) { ctrl_info(ctrl, "%s: no response from device\n", __func__); @@ -566,24 +567,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) * Slot Status contains plain status bits as well as event * notification bits; right now we only want the event bits. */ - events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | - PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | - PCI_EXP_SLTSTA_DLLSC); + status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | + PCI_EXP_SLTSTA_DLLSC;
/* * If we've already reported a power fault, don't report it again * until we've done something to handle it. */ if (ctrl->power_fault_detected) - events &= ~PCI_EXP_SLTSTA_PFD; + status &= ~PCI_EXP_SLTSTA_PFD;
+ events |= status; if (!events) { if (parent) pm_runtime_put(parent); return IRQ_NONE; }
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + if (status) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + + /* + * In MSI mode, all event bits must be zero before the port + * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). + * So re-read the Slot Status register in case a bit was set + * between read and write. + */ + if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) + goto read_status; + } + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); if (parent) pm_runtime_put(parent);
From: Trond Myklebust trond.myklebust@hammerspace.com
[ Upstream commit 08ca8b21f760c0ed5034a5c122092eec22ccf8f4 ]
When a subrequest is being detached from the subgroup, we want to ensure that it is not holding the group lock, or in the process of waiting for the group lock.
Fixes: 5b2b5187fa85 ("NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases") Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfs/pagelist.c | 67 +++++++++++++++++++++++++++------------- fs/nfs/write.c | 10 ++++-- include/linux/nfs_page.h | 2 ++ 3 files changed, 55 insertions(+), 24 deletions(-)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index b736912098eee..f4407dd426bf0 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -133,47 +133,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/* - * nfs_page_group_lock - lock the head of the page group - * @req - request in group that is to be locked + * nfs_page_set_headlock - set the request PG_HEADLOCK + * @req: request that is to be locked * - * this lock must be held when traversing or modifying the page - * group list + * this lock must be held when modifying req->wb_head * * return 0 on success, < 0 on error */ int -nfs_page_group_lock(struct nfs_page *req) +nfs_page_set_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - - if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) + if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) return 0;
- set_bit(PG_CONTENDED1, &head->wb_flags); + set_bit(PG_CONTENDED1, &req->wb_flags); smp_mb__after_atomic(); - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, + return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, TASK_UNINTERRUPTIBLE); }
/* - * nfs_page_group_unlock - unlock the head of the page group - * @req - request in group that is to be unlocked + * nfs_page_clear_headlock - clear the request PG_HEADLOCK + * @req: request that is to be locked */ void -nfs_page_group_unlock(struct nfs_page *req) +nfs_page_clear_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - smp_mb__before_atomic(); - clear_bit(PG_HEADLOCK, &head->wb_flags); + clear_bit(PG_HEADLOCK, &req->wb_flags); smp_mb__after_atomic(); - if (!test_bit(PG_CONTENDED1, &head->wb_flags)) + if (!test_bit(PG_CONTENDED1, &req->wb_flags)) return; - wake_up_bit(&head->wb_flags, PG_HEADLOCK); + wake_up_bit(&req->wb_flags, PG_HEADLOCK); +} + +/* + * nfs_page_group_lock - lock the head of the page group + * @req: request in group that is to be locked + * + * this lock must be held when traversing or modifying the page + * group list + * + * return 0 on success, < 0 on error + */ +int +nfs_page_group_lock(struct nfs_page *req) +{ + int ret; + + ret = nfs_page_set_headlock(req); + if (ret || req->wb_head == req) + return ret; + return nfs_page_set_headlock(req->wb_head); +} + +/* + * nfs_page_group_unlock - unlock the head of the page group + * @req: request in group that is to be unlocked + */ +void +nfs_page_group_unlock(struct nfs_page *req) +{ + if (req != req->wb_head) + nfs_page_clear_headlock(req->wb_head); + nfs_page_clear_headlock(req); }
/* diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 58c8317dd7d88..613c3ef23e07b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -425,22 +425,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, destroy_list = (subreq->wb_this_page == old_head) ? NULL : subreq->wb_this_page;
+ /* Note: lock subreq in order to change subreq->wb_head */ + nfs_page_set_headlock(subreq); WARN_ON_ONCE(old_head != subreq->wb_head);
/* make sure old group is not used */ subreq->wb_this_page = subreq; + subreq->wb_head = subreq;
clear_bit(PG_REMOVE, &subreq->wb_flags);
/* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { /* Check if we raced with nfs_page_group_destroy() */ - if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { + nfs_page_clear_headlock(subreq); nfs_free_request(subreq); + } else + nfs_page_clear_headlock(subreq); continue; } + nfs_page_clear_headlock(subreq);
- subreq->wb_head = subreq; nfs_release_request(old_head);
if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 0bbd587fac6a9..7e9419d74b86b 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/*
From: James Zhu James.Zhu@amd.com
[ Upstream commit ef563ff403404ef2f234abe79bdd9f04ab6481c9 ]
Add vcn dpg harware synchronization to fix race condition issue between vcn driver and hardware.
Signed-off-by: James Zhu James.Zhu@amd.com Reviewed-by: Leo Liu leo.liu@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 36ad0c0e8efbc..cd2cbe760e883 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1026,6 +1026,10 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); /* set the write pointer delay */ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
@@ -1048,6 +1052,9 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); return 0; }
@@ -1357,8 +1364,13 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); /* Restore */ ring = &adev->vcn.inst->ring_enc[0]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); @@ -1366,6 +1378,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring = &adev->vcn.inst->ring_enc[1]; + ring->wptr = 0; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); @@ -1374,6 +1387,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
From: Anju T Sudhakar anju@linux.vnet.ibm.com
[ Upstream commit a36e8ba60b991d563677227f172db69e030797e6 ]
IMC(In-memory Collection Counters) does performance monitoring in two different modes, i.e accumulation mode(core-imc and thread-imc events), and trace mode(trace-imc events). A cpu thread can either be in accumulation-mode or trace-mode at a time and this is done via the LDBAR register in POWER architecture. The current design does not address the races between thread-imc and trace-imc events.
Patch implements a global id and lock to avoid the races between core, trace and thread imc events. With this global id-lock implementation, the system can either run core, thread or trace imc events at a time. i.e. to run any core-imc events, thread/trace imc events should not be enabled/monitored.
Signed-off-by: Anju T Sudhakar anju@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/20200313055238.8656-1-anju@linux.vnet.ibm.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/perf/imc-pmu.c | 173 +++++++++++++++++++++++++++++++----- 1 file changed, 149 insertions(+), 24 deletions(-)
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index cb50a9e1fd2d7..eb82dda884e51 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -44,6 +44,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem); static struct imc_pmu_ref *trace_imc_refc; static int trace_imc_mem_size;
+/* + * Global data structure used to avoid races between thread, + * core and trace-imc + */ +static struct imc_pmu_ref imc_global_refc = { + .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), + .id = 0, + .refc = 0, +}; + static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) { return container_of(event->pmu, struct imc_pmu, pmu); @@ -698,6 +708,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu) return -EINVAL;
ref->refc = 0; + /* + * Reduce the global reference count, if this is the + * last cpu in this core and core-imc event running + * in this cpu. + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == IMC_DOMAIN_CORE) + imc_global_refc.refc--; + + mutex_unlock(&imc_global_refc.lock); } return 0; } @@ -710,6 +730,23 @@ static int core_imc_pmu_cpumask_init(void) ppc_core_imc_cpu_offline); }
+static void reset_global_refc(struct perf_event *event) +{ + mutex_lock(&imc_global_refc.lock); + imc_global_refc.refc--; + + /* + * If no other thread is running any + * event for this domain(thread/core/trace), + * set the global id to zero. + */ + if (imc_global_refc.refc <= 0) { + imc_global_refc.refc = 0; + imc_global_refc.id = 0; + } + mutex_unlock(&imc_global_refc.lock); +} + static void core_imc_counters_release(struct perf_event *event) { int rc, core_id; @@ -759,6 +796,8 @@ static void core_imc_counters_release(struct perf_event *event) ref->refc = 0; } mutex_unlock(&ref->lock); + + reset_global_refc(event); }
static int core_imc_event_init(struct perf_event *event) @@ -819,6 +858,29 @@ static int core_imc_event_init(struct perf_event *event) ++ref->refc; mutex_unlock(&ref->lock);
+ /* + * Since the system can run either in accumulation or trace-mode + * of IMC at a time, core-imc events are allowed only if no other + * trace/thread imc events are enabled/monitored. + * + * Take the global lock, and check the refc.id + * to know whether any other trace/thread imc + * events are running. + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { + /* + * No other trace/thread imc events are running in + * the system, so set the refc.id to core-imc. + */ + imc_global_refc.id = IMC_DOMAIN_CORE; + imc_global_refc.refc++; + } else { + mutex_unlock(&imc_global_refc.lock); + return -EBUSY; + } + mutex_unlock(&imc_global_refc.lock); + event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); event->destroy = core_imc_counters_release; return 0; @@ -877,7 +939,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu)
static int ppc_thread_imc_cpu_offline(unsigned int cpu) { - mtspr(SPRN_LDBAR, 0); + /* + * Set the bit 0 of LDBAR to zero. + * + * If bit 0 of LDBAR is unset, it will stop posting + * the counter data to memory. + * For thread-imc, bit 0 of LDBAR will be set to 1 in the + * event_add function. So reset this bit here, to stop the updates + * to memory in the cpu_offline path. + */ + mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); + + /* Reduce the refc if thread-imc event running on this cpu */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == IMC_DOMAIN_THREAD) + imc_global_refc.refc--; + mutex_unlock(&imc_global_refc.lock); + return 0; }
@@ -916,7 +994,22 @@ static int thread_imc_event_init(struct perf_event *event) if (!target) return -EINVAL;
+ mutex_lock(&imc_global_refc.lock); + /* + * Check if any other trace/core imc events are running in the + * system, if not set the global id to thread-imc. + */ + if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) { + imc_global_refc.id = IMC_DOMAIN_THREAD; + imc_global_refc.refc++; + } else { + mutex_unlock(&imc_global_refc.lock); + return -EBUSY; + } + mutex_unlock(&imc_global_refc.lock); + event->pmu->task_ctx_nr = perf_sw_context; + event->destroy = reset_global_refc; return 0; }
@@ -1063,10 +1156,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags) int core_id; struct imc_pmu_ref *ref;
- mtspr(SPRN_LDBAR, 0); - core_id = smp_processor_id() / threads_per_core; ref = &core_imc_refc[core_id]; + if (!ref) { + pr_debug("imc: Failed to get event reference count\n"); + return; + }
mutex_lock(&ref->lock); ref->refc--; @@ -1082,6 +1177,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags) ref->refc = 0; } mutex_unlock(&ref->lock); + + /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ + mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); + /* * Take a snapshot and calculate the delta and update * the event counter values. @@ -1133,7 +1232,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu)
static int ppc_trace_imc_cpu_offline(unsigned int cpu) { - mtspr(SPRN_LDBAR, 0); + /* + * No need to set bit 0 of LDBAR to zero, as + * it is set to zero for imc trace-mode + * + * Reduce the refc if any trace-imc event running + * on this cpu. + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == IMC_DOMAIN_TRACE) + imc_global_refc.refc--; + mutex_unlock(&imc_global_refc.lock); + return 0; }
@@ -1226,15 +1336,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags) local_mem = get_trace_imc_event_base_addr(); ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
- if (core_imc_refc) - ref = &core_imc_refc[core_id]; + /* trace-imc reference count */ + if (trace_imc_refc) + ref = &trace_imc_refc[core_id]; if (!ref) { - /* If core-imc is not enabled, use trace-imc reference count */ - if (trace_imc_refc) - ref = &trace_imc_refc[core_id]; - if (!ref) - return -EINVAL; + pr_debug("imc: Failed to get the event reference count\n"); + return -EINVAL; } + mtspr(SPRN_LDBAR, ldbar_value); mutex_lock(&ref->lock); if (ref->refc == 0) { @@ -1242,13 +1351,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags) get_hard_smp_processor_id(smp_processor_id()))) { mutex_unlock(&ref->lock); pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); - mtspr(SPRN_LDBAR, 0); return -EINVAL; } } ++ref->refc; mutex_unlock(&ref->lock); - return 0; }
@@ -1274,16 +1381,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags) int core_id = smp_processor_id() / threads_per_core; struct imc_pmu_ref *ref = NULL;
- if (core_imc_refc) - ref = &core_imc_refc[core_id]; + if (trace_imc_refc) + ref = &trace_imc_refc[core_id]; if (!ref) { - /* If core-imc is not enabled, use trace-imc reference count */ - if (trace_imc_refc) - ref = &trace_imc_refc[core_id]; - if (!ref) - return; + pr_debug("imc: Failed to get event reference count\n"); + return; } - mtspr(SPRN_LDBAR, 0); + mutex_lock(&ref->lock); ref->refc--; if (ref->refc == 0) { @@ -1297,6 +1401,7 @@ static void trace_imc_event_del(struct perf_event *event, int flags) ref->refc = 0; } mutex_unlock(&ref->lock); + trace_imc_event_stop(event, flags); }
@@ -1314,10 +1419,30 @@ static int trace_imc_event_init(struct perf_event *event) if (event->attr.sample_period == 0) return -ENOENT;
+ /* + * Take the global lock, and make sure + * no other thread is running any core/thread imc + * events + */ + mutex_lock(&imc_global_refc.lock); + if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { + /* + * No core/thread imc events are running in the + * system, so set the refc.id to trace-imc. + */ + imc_global_refc.id = IMC_DOMAIN_TRACE; + imc_global_refc.refc++; + } else { + mutex_unlock(&imc_global_refc.lock); + return -EBUSY; + } + mutex_unlock(&imc_global_refc.lock); + event->hw.idx = -1; target = event->hw.target;
event->pmu->task_ctx_nr = perf_hw_context; + event->destroy = reset_global_refc; return 0; }
@@ -1429,10 +1554,10 @@ static void cleanup_all_core_imc_memory(void) static void thread_imc_ldbar_disable(void *dummy) { /* - * By Zeroing LDBAR, we disable thread-imc - * updates. + * By setting 0th bit of LDBAR to zero, we disable thread-imc + * updates to memory. */ - mtspr(SPRN_LDBAR, 0); + mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); }
void thread_imc_disable(void)
From: Nathan Chancellor natechancellor@gmail.com
[ Upstream commit b0d14fc43d39203ae025f20ef4d5d25d9ccf4be1 ]
Clang warns:
mm/kmemleak.c:1955:28: warning: array comparison always evaluates to a constant [-Wtautological-compare] if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) ^ mm/kmemleak.c:1955:60: warning: array comparison always evaluates to a constant [-Wtautological-compare] if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
These are not true arrays, they are linker defined symbols, which are just addresses. Using the address of operator silences the warning and does not change the resulting assembly with either clang/ld.lld or gcc/ld (tested with diff + objdump -Dr).
Suggested-by: Nick Desaulniers ndesaulniers@google.com Signed-off-by: Nathan Chancellor natechancellor@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Acked-by: Catalin Marinas catalin.marinas@arm.com Link: https://github.com/ClangBuiltLinux/linux/issues/895 Link: http://lkml.kernel.org/r/20200220051551.44000-1-natechancellor@gmail.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/kmemleak.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 2446076633631..312942d784058 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1947,7 +1947,7 @@ void __init kmemleak_init(void) create_object((unsigned long)__bss_start, __bss_stop - __bss_start, KMEMLEAK_GREY, GFP_ATOMIC); /* only register .data..ro_after_init if not within .data */ - if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) + if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata) create_object((unsigned long)__start_ro_after_init, __end_ro_after_init - __start_ro_after_init, KMEMLEAK_GREY, GFP_ATOMIC);
From: Xianting Tian xianting_tian@126.com
[ Upstream commit faffdfa04fa11ccf048cebdde73db41ede0679e0 ]
Mount failure issue happens under the scenario: Application forked dozens of threads to mount the same number of cramfs images separately in docker, but several mounts failed with high probability. Mount failed due to the checking result of the page(read from the superblock of loop dev) is not uptodate after wait_on_page_locked(page) returned in function cramfs_read:
wait_on_page_locked(page); if (!PageUptodate(page)) { ... }
The reason of the checking result of the page not uptodate: systemd-udevd read the loopX dev before mount, because the status of loopX is Lo_unbound at this time, so loop_make_request directly trigger the calling of io_end handler end_buffer_async_read, which called SetPageError(page). So It caused the page can't be set to uptodate in function end_buffer_async_read:
if(page_uptodate && !PageError(page)) { SetPageUptodate(page); }
Then mount operation is performed, it used the same page which is just accessed by systemd-udevd above, Because this page is not uptodate, it will launch a actual read via submit_bh, then wait on this page by calling wait_on_page_locked(page). When the I/O of the page done, io_end handler end_buffer_async_read is called, because no one cleared the page error(during the whole read path of mount), which is caused by systemd-udevd reading, so this page is still in "PageError" status, which can't be set to uptodate in function end_buffer_async_read, then caused mount failure.
But sometimes mount succeed even through systemd-udeved read loopX dev just before, The reason is systemd-udevd launched other loopX read just between step 3.1 and 3.2, the steps as below:
1, loopX dev default status is Lo_unbound; 2, systemd-udved read loopX dev (page is set to PageError); 3, mount operation 1) set loopX status to Lo_bound; ==>systemd-udevd read loopX dev<== 2) read loopX dev(page has no error) 3) mount succeed
As the loopX dev status is set to Lo_bound after step 3.1, so the other loopX dev read by systemd-udevd will go through the whole I/O stack, part of the call trace as below:
SYS_read vfs_read do_sync_read blkdev_aio_read generic_file_aio_read do_generic_file_read: ClearPageError(page); mapping->a_ops->readpage(filp, page);
here, mapping->a_ops->readpage() is blkdev_readpage. In latest kernel, some function name changed, the call trace as below:
blkdev_read_iter generic_file_read_iter generic_file_buffered_read: /* * A previous I/O error may have been due to temporary * failures, eg. mutipath errors. * Pg_error will be set again if readpage fails. */ ClearPageError(page); /* Start the actual read. The read will unlock the page*/ error=mapping->a_ops->readpage(flip, page);
We can see ClearPageError(page) is called before the actual read, then the read in step 3.2 succeed.
This patch is to add the calling of ClearPageError just before the actual read of read path of cramfs mount. Without the patch, the call trace as below when performing cramfs mount:
do_mount cramfs_read cramfs_blkdev_read read_cache_page do_read_cache_page: filler(data, page); or mapping->a_ops->readpage(data, page);
With the patch, the call trace as below when performing mount:
do_mount cramfs_read cramfs_blkdev_read read_cache_page: do_read_cache_page: ClearPageError(page); <== new add filler(data, page); or mapping->a_ops->readpage(data, page);
With the patch, mount operation trigger the calling of ClearPageError(page) before the actual read, the page has no error if no additional page error happen when I/O done.
Signed-off-by: Xianting Tian xianting_tian@126.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Matthew Wilcox (Oracle) willy@infradead.org Cc: Jan Kara jack@suse.cz Cc: yubin@h3c.com Link: http://lkml.kernel.org/r/1583318844-22971-1-git-send-email-xianting_tian@126... Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/filemap.c | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/mm/filemap.c b/mm/filemap.c index 18c1f58300742..51b2cb5aa5030 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2845,6 +2845,14 @@ filler: unlock_page(page); goto out; } + + /* + * A previous I/O error may have been due to temporary + * failures. + * Clear page error before actual read, PG_error will be + * set again if read page fails. + */ + ClearPageError(page); goto filler;
out:
From: Qian Cai cai@lca.pw
[ Upstream commit 218209487c3da2f6d861b236c11226b6eca7b7b7 ]
si->inuse_pages could be accessed concurrently as noticed by KCSAN,
write to 0xffff98b00ebd04dc of 4 bytes by task 82262 on cpu 92: swap_range_free+0xbe/0x230 swap_range_free at mm/swapfile.c:719 swapcache_free_entries+0x1be/0x250 free_swap_slot+0x1c8/0x220 __swap_entry_free.constprop.19+0xa3/0xb0 free_swap_and_cache+0x53/0xa0 unmap_page_range+0x7e0/0x1ce0 unmap_single_vma+0xcd/0x170 unmap_vmas+0x18b/0x220 exit_mmap+0xee/0x220 mmput+0xe7/0x240 do_exit+0x598/0xfd0 do_group_exit+0x8b/0x180 get_signal+0x293/0x13d0 do_signal+0x37/0x5d0 prepare_exit_to_usermode+0x1b7/0x2c0 ret_from_intr+0x32/0x42
read to 0xffff98b00ebd04dc of 4 bytes by task 82499 on cpu 46: try_to_unuse+0x86b/0xc80 try_to_unuse at mm/swapfile.c:2185 __x64_sys_swapoff+0x372/0xd40 do_syscall_64+0x91/0xb05 entry_SYSCALL_64_after_hwframe+0x49/0xbe
The plain reads in try_to_unuse() are outside si->lock critical section which result in data races that could be dangerous to be used in a loop. Fix them by adding READ_ONCE().
Signed-off-by: Qian Cai cai@lca.pw Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Marco Elver elver@google.com Cc: Hugh Dickins hughd@google.com Link: http://lkml.kernel.org/r/1582578903-29294-1-git-send-email-cai@lca.pw Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/swapfile.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c index 646fd0a8e3202..2f59495782dd4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2132,7 +2132,7 @@ int try_to_unuse(unsigned int type, bool frontswap, swp_entry_t entry; unsigned int i;
- if (!si->inuse_pages) + if (!READ_ONCE(si->inuse_pages)) return 0;
if (!frontswap) @@ -2148,7 +2148,7 @@ retry:
spin_lock(&mmlist_lock); p = &init_mm.mmlist; - while (si->inuse_pages && + while (READ_ONCE(si->inuse_pages) && !signal_pending(current) && (p = p->next) != &init_mm.mmlist) {
@@ -2177,7 +2177,7 @@ retry: mmput(prev_mm);
i = 0; - while (si->inuse_pages && + while (READ_ONCE(si->inuse_pages) && !signal_pending(current) && (i = find_next_to_unuse(si, i, frontswap)) != 0) {
@@ -2219,7 +2219,7 @@ retry: * been preempted after get_swap_page(), temporarily hiding that swap. * It's easy and robust (though cpu-intensive) just to keep retrying. */ - if (si->inuse_pages) { + if (READ_ONCE(si->inuse_pages)) { if (!signal_pending(current)) goto retry; retval = -EINTR;
From: Qian Cai cai@lca.pw
[ Upstream commit 5644e1fbbfe15ad06785502bbfe5751223e5841d ]
pgdat->kswapd_classzone_idx could be accessed concurrently in wakeup_kswapd(). Plain writes and reads without any lock protection result in data races. Fix them by adding a pair of READ|WRITE_ONCE() as well as saving a branch (compilers might well optimize the original code in an unintentional way anyway). While at it, also take care of pgdat->kswapd_order and non-kswapd threads in allow_direct_reclaim(). The data races were reported by KCSAN,
BUG: KCSAN: data-race in wakeup_kswapd / wakeup_kswapd
write to 0xffff9f427ffff2dc of 4 bytes by task 7454 on cpu 13: wakeup_kswapd+0xf1/0x400 wakeup_kswapd at mm/vmscan.c:3967 wake_all_kswapds+0x59/0xc0 wake_all_kswapds at mm/page_alloc.c:4241 __alloc_pages_slowpath+0xdcc/0x1290 __alloc_pages_slowpath at mm/page_alloc.c:4512 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x16e/0x6f0 __handle_mm_fault+0xcd5/0xd40 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40
1 lock held by mtest01/7454: #0: ffff9f425afe8808 (&mm->mmap_sem#2){++++}, at: do_page_fault+0x143/0x6f9 do_user_addr_fault at arch/x86/mm/fault.c:1405 (inlined by) do_page_fault at arch/x86/mm/fault.c:1539 irq event stamp: 6944085 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x34c/0x57c irq_exit+0xa2/0xc0
read to 0xffff9f427ffff2dc of 4 bytes by task 7472 on cpu 38: wakeup_kswapd+0xc8/0x400 wake_all_kswapds+0x59/0xc0 __alloc_pages_slowpath+0xdcc/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x16e/0x6f0 __handle_mm_fault+0xcd5/0xd40 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40
1 lock held by mtest01/7472: #0: ffff9f425a9ac148 (&mm->mmap_sem#2){++++}, at: do_page_fault+0x143/0x6f9 irq event stamp: 6793561 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x34c/0x57c irq_exit+0xa2/0xc0
BUG: KCSAN: data-race in kswapd / wakeup_kswapd
write to 0xffff90973ffff2dc of 4 bytes by task 820 on cpu 6: kswapd+0x27c/0x8d0 kthread+0x1e0/0x200 ret_from_fork+0x27/0x50
read to 0xffff90973ffff2dc of 4 bytes by task 6299 on cpu 0: wakeup_kswapd+0xf3/0x450 wake_all_kswapds+0x59/0xc0 __alloc_pages_slowpath+0xdcc/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x170/0x700 __handle_mm_fault+0xc9f/0xd00 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40
Signed-off-by: Qian Cai cai@lca.pw Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Marco Elver elver@google.com Cc: Matthew Wilcox willy@infradead.org Link: http://lkml.kernel.org/r/1582749472-5171-1-git-send-email-cai@lca.pw Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/vmscan.c | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7fde5f904c8d3..d0404d8b37254 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3160,8 +3160,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
/* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { - pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, - (enum zone_type)ZONE_NORMAL); + if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL) + WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL); + wake_up_interruptible(&pgdat->kswapd_wait); }
@@ -3793,9 +3794,9 @@ out: static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, enum zone_type prev_classzone_idx) { - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return prev_classzone_idx; - return pgdat->kswapd_classzone_idx; + enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx; }
static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3839,8 +3840,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * the previous request that slept prematurely. */ if (remaining) { - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); + WRITE_ONCE(pgdat->kswapd_classzone_idx, + kswapd_classzone_idx(pgdat, classzone_idx)); + + if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) + WRITE_ONCE(pgdat->kswapd_order, reclaim_order); }
finish_wait(&pgdat->kswapd_wait, &wait); @@ -3917,12 +3921,12 @@ static int kswapd(void *p) tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable();
- pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); for ( ; ; ) { bool ret;
- alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
kswapd_try_sleep: @@ -3930,10 +3934,10 @@ kswapd_try_sleep: classzone_idx);
/* Read the new order and classzone_idx */ - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
ret = try_to_freeze(); if (kthread_should_stop()) @@ -3977,20 +3981,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; + enum zone_type curr_idx;
if (!managed_zone(zone)) return;
if (!cpuset_zone_allowed(zone, gfp_flags)) return; + pgdat = zone->zone_pgdat; + curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx) + WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx); + + if (READ_ONCE(pgdat->kswapd_order) < order) + WRITE_ONCE(pgdat->kswapd_order, order);
- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - pgdat->kswapd_classzone_idx = classzone_idx; - else - pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, - classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return;
From: Trond Myklebust trond.myklebust@hammerspace.com
[ Upstream commit 1fab7dc477241c12f977955aa6baea7938b6f08d ]
Move the test for whether a task is already queued to prevent corruption of the timer list in __rpc_sleep_on_priority_timeout().
Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/sunrpc/sched.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 9c79548c68474..53d8b82eda006 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -204,10 +204,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, unsigned char queue_priority) { - WARN_ON_ONCE(RPC_IS_QUEUED(task)); - if (RPC_IS_QUEUED(task)) - return; - INIT_LIST_HEAD(&task->u.tk_wait.timer_list); if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); @@ -382,7 +378,7 @@ static void rpc_make_runnable(struct workqueue_struct *wq, * NB: An RPC task will only receive interrupt-driven events as long * as it's on a wait queue. */ -static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, +static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, unsigned char queue_priority) { @@ -395,12 +391,23 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
}
+static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, + struct rpc_task *task, + unsigned char queue_priority) +{ + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) + return; + __rpc_do_sleep_on_priority(q, task, queue_priority); +} + static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, struct rpc_task *task, unsigned long timeout, unsigned char queue_priority) { + if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) + return; if (time_is_after_jiffies(timeout)) { - __rpc_sleep_on_priority(q, task, queue_priority); + __rpc_do_sleep_on_priority(q, task, queue_priority); __rpc_add_timer(q, task, timeout); } else task->tk_status = -ETIMEDOUT;
From: Israel Rukshin israelr@mellanox.com
[ Upstream commit 21f9024355e58772ec5d7fc3534aa5e29d72a8b6 ]
In case rdma accept fails at nvmet_rdma_queue_connect(), release work is scheduled. Later on, a new RDMA CM event may arrive since we didn't destroy the cm-id and call nvmet_rdma_queue_connect_fail(), which schedule another release work. This will cause calling nvmet_rdma_free_queue twice. To fix this we implicitly destroy the cm_id with non-zero ret code, which guarantees that new rdma_cm events will not arrive afterwards. Also add a qp pointer to nvmet_rdma_queue structure, so we can use it when the cm_id pointer is NULL or was destroyed.
Signed-off-by: Israel Rukshin israelr@mellanox.com Suggested-by: Sagi Grimberg sagi@grimberg.me Reviewed-by: Max Gurtovoy maxg@mellanox.com Reviewed-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/target/rdma.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 36d906a7f70d3..b5314164479e9 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -75,6 +75,7 @@ enum nvmet_rdma_queue_state {
struct nvmet_rdma_queue { struct rdma_cm_id *cm_id; + struct ib_qp *qp; struct nvmet_port *port; struct ib_cq *cq; atomic_t sq_wr_avail; @@ -464,7 +465,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, if (ndev->srq) ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); else - ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); + ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
if (unlikely(ret)) pr_err("post_recv cmd failed\n"); @@ -503,7 +504,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
if (rsp->n_rdma) { - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); } @@ -587,7 +588,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); rsp->n_rdma = 0; @@ -742,7 +743,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) }
if (nvmet_rdma_need_data_in(rsp)) { - if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, + if (rdma_rw_ctx_post(&rsp->rw, queue->qp, queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { @@ -1025,6 +1026,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } + queue->qp = queue->cm_id->qp;
atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
@@ -1053,11 +1055,10 @@ err_destroy_cq:
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { - struct ib_qp *qp = queue->cm_id->qp; - - ib_drain_qp(qp); - rdma_destroy_id(queue->cm_id); - ib_destroy_qp(qp); + ib_drain_qp(queue->qp); + if (queue->cm_id) + rdma_destroy_id(queue->cm_id); + ib_destroy_qp(queue->qp); ib_free_cq(queue->cq); }
@@ -1291,9 +1292,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - schedule_work(&queue->release_work); - /* Destroying rdma_cm id is not needed here */ - return 0; + /* + * Don't destroy the cm_id in free path, as we implicitly + * destroy the cm_id here with non-zero ret code. + */ + queue->cm_id = NULL; + goto free_queue; }
mutex_lock(&nvmet_rdma_queue_mutex); @@ -1302,6 +1306,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0;
+free_queue: + nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev);
From: Sebastian Andrzej Siewior bigeasy@linutronix.de
[ Upstream commit 62849a9612924a655c67cf6962920544aa5c20db ]
The kernel test robot triggered a warning with the following race: task-ctx A interrupt-ctx B worker -> process_one_work() -> work_item() -> schedule(); -> sched_submit_work() -> wq_worker_sleeping() -> ->sleeping = 1 atomic_dec_and_test(nr_running) __schedule(); *interrupt* async_page_fault() -> local_irq_enable(); -> schedule(); -> sched_submit_work() -> wq_worker_sleeping() -> if (WARN_ON(->sleeping)) return -> __schedule() -> sched_update_worker() -> wq_worker_running() -> atomic_inc(nr_running); -> ->sleeping = 0;
-> sched_update_worker() -> wq_worker_running() if (!->sleeping) return
In this context the warning is pointless everything is fine. An interrupt before wq_worker_sleeping() will perform the ->sleeping assignment (0 -> 1 > 0) twice. An interrupt after wq_worker_sleeping() will trigger the warning and nr_running will be decremented (by A) and incremented once (only by B, A will skip it). This is the case until the ->sleeping is zeroed again in wq_worker_running().
Remove the WARN statement because this condition may happen. Document that preemption around wq_worker_sleeping() needs to be disabled to protect ->sleeping and not just as an optimisation.
Fixes: 6d25be5782e48 ("sched/core, workqueues: Distangle worker accounting from rq lock") Reported-by: kernel test robot lkp@intel.com Signed-off-by: Sebastian Andrzej Siewior bigeasy@linutronix.de Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Cc: Tejun Heo tj@kernel.org Link: https://lkml.kernel.org/r/20200327074308.GY11705@shao2-debian Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sched/core.c | 3 ++- kernel/workqueue.c | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 352239c411a44..79ce22de44095 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4199,7 +4199,8 @@ static inline void sched_submit_work(struct task_struct *tsk) * it wants to wake up a task to maintain concurrency. * As this function is called inside the schedule() context, * we disable preemption to avoid it calling schedule() again - * in the possible wakeup of a kworker. + * in the possible wakeup of a kworker and because wq_worker_sleeping() + * requires it. */ if (tsk->flags & PF_WQ_WORKER) { preempt_disable(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1a0c224af6fb3..4aa268582a225 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -864,7 +864,8 @@ void wq_worker_running(struct task_struct *task) * @task: task going to sleep * * This function is called from schedule() when a busy worker is - * going to sleep. + * going to sleep. Preemption needs to be disabled to protect ->sleeping + * assignment. */ void wq_worker_sleeping(struct task_struct *task) { @@ -881,7 +882,8 @@ void wq_worker_sleeping(struct task_struct *task)
pool = worker->pool;
- if (WARN_ON_ONCE(worker->sleeping)) + /* Return if preempted before wq_worker_running() was reached */ + if (worker->sleeping) return;
worker->sleeping = 1;
From: Jack Zhang Jack.Zhang1@amd.com
[ Upstream commit 04bef61e5da18c2b301c629a209ccdba4d4c6fbb ]
kfd_pre_reset will free mem_objs allocated by kfd_gtt_sa_allocate
Without this change, sriov tdr code path will never free those allocated memories and get memory leak.
v2:add a bugfix for kiq ring test fail
Signed-off-by: Jack Zhang Jack.Zhang1@amd.com Reviewed-by: Monk Liu monk.liu@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ 3 files changed, 8 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index d10f483f5e273..ce30d4e8bf25f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -644,6 +644,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd);
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) + return 0; + #if 0 unsigned long flags; int retry; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e262f2ac07a35..92754cfb98086 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -540,6 +540,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t temp; struct v9_mqd *m = get_mqd(mqd);
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) + return 0; + if (adev->in_gpu_reset) return -EIO;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5e1dce4241547..4105fbf571674 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3466,6 +3466,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r;
+ amdgpu_amdkfd_pre_reset(adev); + /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r)
From: Jaewon Kim jaewon31.kim@samsung.com
[ Upstream commit 09ef5283fd96ac424ef0e569626f359bf9ab86c9 ]
On passing requirement to vm_unmapped_area, arch_get_unmapped_area and arch_get_unmapped_area_topdown did not set align_offset. Internally on both unmapped_area and unmapped_area_topdown, if info->align_mask is 0, then info->align_offset was meaningless.
But commit df529cabb7a2 ("mm: mmap: add trace point of vm_unmapped_area") always prints info->align_offset even though it is uninitialized.
Fix this uninitialized value issue by setting it to 0 explicitly.
Before: vm_unmapped_area: addr=0x755b155000 err=0 total_vm=0x15aaf0 flags=0x1 len=0x109000 lo=0x8000 hi=0x75eed48000 mask=0x0 ofs=0x4022
After: vm_unmapped_area: addr=0x74a4ca1000 err=0 total_vm=0x168ab1 flags=0x1 len=0x9000 lo=0x8000 hi=0x753d94b000 mask=0x0 ofs=0x0
Signed-off-by: Jaewon Kim jaewon31.kim@samsung.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Andrew Morton akpm@linux-foundation.org Cc: Matthew Wilcox (Oracle) willy@infradead.org Cc: Michel Lespinasse walken@google.com Cc: Borislav Petkov bp@suse.de Link: http://lkml.kernel.org/r/20200409094035.19457-1-jaewon31.kim@samsung.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/mmap.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/mm/mmap.c b/mm/mmap.c index a3584a90c55c2..ba78f1f1b1bd1 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2126,6 +2126,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; + info.align_offset = 0; return vm_unmapped_area(&info); } #endif @@ -2167,6 +2168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; + info.align_offset = 0; addr = vm_unmapped_area(&info);
/*
From: Takashi Iwai tiwai@suse.de
[ Upstream commit c4c8dd6ef807663e42a5f04ea77cd62029eb99fa ]
The HD-audio controller does system-suspend and resume operations by directly calling its helpers __azx_runtime_suspend() and __azx_runtime_resume(). However, in general, we don't have to resume always the device fully at the system resume; typically, if a device has been runtime-suspended, we can leave it to runtime resume.
Usually for achieving this, the driver would call pm_runtime_force_suspend() and pm_runtime_force_resume() pairs in the system suspend and resume ops. Unfortunately, this doesn't work for the resume path in our case. For handling the jack detection at the system resume, a child codec device may need the (literally) forcibly resume even if it's been runtime-suspended, and for that, the controller device must be also resumed even if it's been suspended.
This patch is an attempt to improve the situation. It replaces the direct __azx_runtime_suspend()/_resume() calls with with pm_runtime_force_suspend() and pm_runtime_force_resume() with a slight trick as we've done for the codec side. More exactly:
- azx_has_pm_runtime() check is dropped from azx_runtime_suspend() and azx_runtime_resume(), so that it can be properly executed from the system-suspend/resume path
- The WAKEEN handling depends on the card's power state now; it's set and cleared only for the runtime-suspend
- azx_resume() checks whether any codec may need the forcible resume beforehand. If the forcible resume is required, it does temporary PM refcount up/down for actually triggering the runtime resume.
- A new helper function, hda_codec_need_resume(), is introduced for checking whether the codec needs a forcible runtime-resume, and the existing code is rewritten with that.
BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=207043 Link: https://lore.kernel.org/r/20200413082034.25166-6-tiwai@suse.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- include/sound/hda_codec.h | 5 +++++ sound/pci/hda/hda_codec.c | 2 +- sound/pci/hda/hda_intel.c | 38 +++++++++++++++++++++++++++----------- 3 files changed, 33 insertions(+), 12 deletions(-)
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 9a0393cf024c2..65c056ce91128 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -494,6 +494,11 @@ void snd_hda_update_power_acct(struct hda_codec *codec); static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} #endif
+static inline bool hda_codec_need_resume(struct hda_codec *codec) +{ + return !codec->relaxed_resume && codec->jacktbl.used; +} + #ifdef CONFIG_SND_HDA_PATCH_LOADER /* * patch firmware diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 103011e7285a3..12da263fb02ba 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2958,7 +2958,7 @@ static int hda_codec_runtime_resume(struct device *dev) static int hda_codec_force_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); - bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used; + bool forced_resume = hda_codec_need_resume(codec); int ret;
/* The get/put pair below enforces the runtime resume even if the diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 7353d2ec359ae..a6e8aaa091c7d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1025,7 +1025,7 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); - __azx_runtime_suspend(chip); + pm_runtime_force_suspend(dev); if (bus->irq >= 0) { free_irq(bus->irq, chip); bus->irq = -1; @@ -1041,7 +1041,9 @@ static int azx_suspend(struct device *dev) static int azx_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); + struct hda_codec *codec; struct azx *chip; + bool forced_resume = false;
if (!azx_is_pm_ready(card)) return 0; @@ -1052,7 +1054,20 @@ static int azx_resume(struct device *dev) chip->msi = 0; if (azx_acquire_irq(chip, 1) < 0) return -EIO; - __azx_runtime_resume(chip, false); + + /* check for the forced resume */ + list_for_each_codec(codec, &chip->bus) { + if (hda_codec_need_resume(codec)) { + forced_resume = true; + break; + } + } + + if (forced_resume) + pm_runtime_get_noresume(dev); + pm_runtime_force_resume(dev); + if (forced_resume) + pm_runtime_put(dev); snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip); @@ -1099,12 +1114,12 @@ static int azx_runtime_suspend(struct device *dev) if (!azx_is_pm_ready(card)) return 0; chip = card->private_data; - if (!azx_has_pm_runtime(chip)) - return 0;
/* enable controller wake up event */ - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | - STATESTS_INT_MASK); + if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) { + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | + STATESTS_INT_MASK); + }
__azx_runtime_suspend(chip); trace_azx_runtime_suspend(chip); @@ -1115,17 +1130,18 @@ static int azx_runtime_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct azx *chip; + bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0;
if (!azx_is_pm_ready(card)) return 0; chip = card->private_data; - if (!azx_has_pm_runtime(chip)) - return 0; - __azx_runtime_resume(chip, true); + __azx_runtime_resume(chip, from_rt);
/* disable controller Wake Up event*/ - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & - ~STATESTS_INT_MASK); + if (from_rt) { + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & + ~STATESTS_INT_MASK); + }
trace_azx_runtime_resume(chip); return 0;
From: Nilesh Javali njavali@marvell.com
[ Upstream commit b9b97e6903032ec56e6dcbe137a9819b74a17fea ]
The destroy connection ramrod timed out during session logout. Fix the wait delay for graceful vs abortive termination as per the FW requirements.
Link: https://lore.kernel.org/r/20200408064332.19377-7-mrangankar@marvell.com Reviewed-by: Lee Duncan lduncan@suse.com Signed-off-by: Nilesh Javali njavali@marvell.com Signed-off-by: Manish Rangankar mrangankar@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/qedi/qedi_iscsi.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 0f57c80734061..0f2622a48311c 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1062,6 +1062,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) break; }
+ if (!abrt_conn) + wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; + qedi_ep->state = EP_STATE_DISCONN_START; ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); if (ret) {
From: Raviteja Narayanam raviteja.narayanam@xilinx.com
[ Upstream commit 42e11948ddf68b9f799cad8c0ddeab0a39da33e8 ]
On some platforms, the log is corrupted while console is being registered. It is observed that when set_termios is called, there are still some bytes in the FIFO to be transmitted.
So, wait for tx_empty inside cdns_uart_console_setup before calling set_termios.
Signed-off-by: Raviteja Narayanam raviteja.narayanam@xilinx.com Reviewed-by: Shubhrajyoti Datta shubhrajyoti.datta@xilinx.com Link: https://lore.kernel.org/r/1586413563-29125-2-git-send-email-raviteja.narayan... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/serial/xilinx_uartps.c | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 8948970f795e6..9359c80fbb9f5 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1248,6 +1248,7 @@ static int cdns_uart_console_setup(struct console *co, char *options) int bits = 8; int parity = 'n'; int flow = 'n'; + unsigned long time_out;
if (!port->membase) { pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", @@ -1258,6 +1259,13 @@ static int cdns_uart_console_setup(struct console *co, char *options) if (options) uart_parse_options(options, &baud, &parity, &bits, &flow);
+ /* Wait for tx_empty before setting up the console */ + time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT); + + while (time_before(jiffies, time_out) && + cdns_uart_tx_empty(port) != TIOCSER_TEMT) + cpu_relax(); + return uart_set_options(port, co, baud, parity, bits, flow); }
From: Josef Bacik josef@toxicpanda.com
[ Upstream commit aec7db3b13a07d515c15ada752a7287a44a79ea0 ]
I made a mistake with my previous fix, I assumed that we didn't need to mess with the reloc roots once we were out of the part of relocation where we are actually moving the extents.
The subtle thing that I missed is that btrfs_init_reloc_root() also updates the last_trans for the reloc root when we do btrfs_record_root_in_trans() for the corresponding fs_root. I've added a comment to make sure future me doesn't make this mistake again.
This showed up as a WARN_ON() in btrfs_copy_root() because our last_trans didn't == the current transid. This could happen if we snapshotted a fs root with a reloc root after we set rc->create_reloc_tree = 0, but before we actually merge the reloc root.
Worth mentioning that the regression produced the following warning when running snapshot creation and balance in parallel:
BTRFS info (device sdc): relocating block group 30408704 flags metadata|dup ------------[ cut here ]------------ WARNING: CPU: 0 PID: 12823 at fs/btrfs/ctree.c:191 btrfs_copy_root+0x26f/0x430 [btrfs] CPU: 0 PID: 12823 Comm: btrfs Tainted: G W 5.6.0-rc7-btrfs-next-58 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014 RIP: 0010:btrfs_copy_root+0x26f/0x430 [btrfs] RSP: 0018:ffffb96e044279b8 EFLAGS: 00010202 RAX: 0000000000000009 RBX: ffff9da70bf61000 RCX: ffffb96e04427a48 RDX: ffff9da733a770c8 RSI: ffff9da70bf61000 RDI: ffff9da694163818 RBP: ffff9da733a770c8 R08: fffffffffffffff8 R09: 0000000000000002 R10: ffffb96e044279a0 R11: 0000000000000000 R12: ffff9da694163818 R13: fffffffffffffff8 R14: ffff9da6d2512000 R15: ffff9da714cdac00 FS: 00007fdeacf328c0(0000) GS:ffff9da735e00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055a2a5b8a118 CR3: 00000001eed78002 CR4: 00000000003606f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? create_reloc_root+0x49/0x2b0 [btrfs] ? kmem_cache_alloc_trace+0xe5/0x200 create_reloc_root+0x8b/0x2b0 [btrfs] btrfs_reloc_post_snapshot+0x96/0x5b0 [btrfs] create_pending_snapshot+0x610/0x1010 [btrfs] create_pending_snapshots+0xa8/0xd0 [btrfs] btrfs_commit_transaction+0x4c7/0xc50 [btrfs] ? btrfs_mksubvol+0x3cd/0x560 [btrfs] btrfs_mksubvol+0x455/0x560 [btrfs] __btrfs_ioctl_snap_create+0x15f/0x190 [btrfs] btrfs_ioctl_snap_create_v2+0xa4/0xf0 [btrfs] ? mem_cgroup_commit_charge+0x6e/0x540 btrfs_ioctl+0x12d8/0x3760 [btrfs] ? do_raw_spin_unlock+0x49/0xc0 ? _raw_spin_unlock+0x29/0x40 ? __handle_mm_fault+0x11b3/0x14b0 ? ksys_ioctl+0x92/0xb0 ksys_ioctl+0x92/0xb0 ? trace_hardirqs_off_thunk+0x1a/0x1c __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x5c/0x280 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7fdeabd3bdd7
Fixes: 2abc726ab4b8 ("btrfs: do not init a reloc root if we aren't relocating") Reviewed-by: Filipe Manana fdmanana@suse.com Signed-off-by: Josef Bacik josef@toxicpanda.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/relocation.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ece53d2f55ae3..1bc57f7b91cfa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1468,8 +1468,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, int clear_rsv = 0; int ret;
- if (!rc || !rc->create_reloc_tree || - root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) + if (!rc) return 0;
/* @@ -1479,12 +1478,28 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, if (reloc_root_is_dead(root)) return 0;
+ /* + * This is subtle but important. We do not do + * record_root_in_transaction for reloc roots, instead we record their + * corresponding fs root, and then here we update the last trans for the + * reloc root. This means that we have to do this for the entire life + * of the reloc root, regardless of which stage of the relocation we are + * in. + */ if (root->reloc_root) { reloc_root = root->reloc_root; reloc_root->last_trans = trans->transid; return 0; }
+ /* + * We are merging reloc roots, we do not need new reloc trees. Also + * reloc trees never need their own reloc tree. + */ + if (!rc->create_reloc_tree || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) + return 0; + if (!trans->reloc_reserved) { rsv = trans->block_rsv; trans->block_rsv = rc->block_rsv;
From: Steve Rutherford srutherford@google.com
[ Upstream commit 7289fdb5dcdbc5155b5531529c44105868a762f2 ]
Fixes a NULL pointer dereference, caused by the PIT firing an interrupt before the interrupt table has been initialized.
SET_PIT2 can race with the creation of the IRQchip. In particular, if SET_PIT2 is called with a low PIT timer period (after the creation of the IOAPIC, but before the instantiation of the irq routes), the PIT can fire an interrupt at an uninitialized table.
Signed-off-by: Steve Rutherford srutherford@google.com Signed-off-by: Jon Cargille jcargill@google.com Reviewed-by: Jim Mattson jmattson@google.com Message-Id: 20200416191152.259434-1-jcargill@google.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/x86.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8920ee7b28811..67ad417a29ca4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5050,10 +5050,13 @@ set_identity_unlock: r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(u.ps))) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit_out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); +set_pit_out: + mutex_unlock(&kvm->lock); break; } case KVM_GET_PIT2: { @@ -5073,10 +5076,13 @@ set_identity_unlock: r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit2_out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); +set_pit2_out: + mutex_unlock(&kvm->lock); break; } case KVM_REINJECT_CONTROL: {
From: Stephane Eranian eranian@google.com
[ Upstream commit bec49a9e05db3dbdca696fa07c62c52638fb6371 ]
When it is not possible for a non-privilege perf command to monitor at the kernel level (:k), the fallback code forces a :u. That works if the event was previously monitoring both levels. But if the event was already constrained to kernel only, then it does not make sense to restrict it to user only.
Given the code works by exclusion, a kernel only event would have:
attr->exclude_user = 1
The fallback code would add:
attr->exclude_kernel = 1
In the end the end would not monitor in either the user level or kernel level. In other words, it would count nothing.
An event programmed to monitor kernel only cannot be switched to user only without seriously warning the user.
This patch forces an error in this case to make it clear the request cannot really be satisfied.
Behavior with paranoid 1:
$ sudo bash -c "echo 1 > /proc/sys/kernel/perf_event_paranoid" $ perf stat -e cycles:k sleep 1
Performance counter stats for 'sleep 1':
1,520,413 cycles:k
1.002361664 seconds time elapsed
0.002480000 seconds user 0.000000000 seconds sys
Old behavior with paranoid 2:
$ sudo bash -c "echo 2 > /proc/sys/kernel/perf_event_paranoid" $ perf stat -e cycles:k sleep 1 Performance counter stats for 'sleep 1':
0 cycles:ku
1.002358127 seconds time elapsed
0.002384000 seconds user 0.000000000 seconds sys
New behavior with paranoid 2:
$ sudo bash -c "echo 2 > /proc/sys/kernel/perf_event_paranoid" $ perf stat -e cycles:k sleep 1 Error: You may not have permission to collect stats.
Consider tweaking /proc/sys/kernel/perf_event_paranoid, which controls use of the performance events system by unprivileged users (without CAP_PERFMON or CAP_SYS_ADMIN).
The current value is 2:
-1: Allow use of (almost) all events by all users Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
= 0: Disallow ftrace function tracepoint by users without CAP_PERFMON or CAP_SYS_ADMIN
Disallow raw tracepoint access by users without CAP_SYS_PERFMON or CAP_SYS_ADMIN
= 1: Disallow CPU event access by users without CAP_PERFMON or CAP_SYS_ADMIN = 2: Disallow kernel profiling by users without CAP_PERFMON or CAP_SYS_ADMIN
To make this setting permanent, edit /etc/sysctl.conf too, e.g.:
kernel.perf_event_paranoid = -1
v2 of this patch addresses the review feedback from jolsa@redhat.com.
Signed-off-by: Stephane Eranian eranian@google.com Reviewed-by: Ian Rogers irogers@google.com Acked-by: Jiri Olsa jolsa@redhat.com Tested-by: Arnaldo Carvalho de Melo acme@redhat.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Jiri Olsa jolsa@redhat.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Link: http://lore.kernel.org/lkml/20200414161550.225588-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/evsel.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index dfc982baecab4..12b1755b136d3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2358,6 +2358,10 @@ bool perf_evsel__fallback(struct evsel *evsel, int err, char *new_name; const char *sep = ":";
+ /* If event has exclude user then don't exclude kernel. */ + if (evsel->core.attr.exclude_user) + return false; + /* Is there already the separator in the name. */ if (strchr(name, '/') || strchr(name, ':'))
From: Douglas Anderson dianders@chromium.org
[ Upstream commit b849dd84b6ccfe32622988b79b7b073861fcf9f7 ]
While trying to "dd" to the block device for a USB stick, I encountered a hung task warning (blocked for > 120 seconds). I managed to come up with an easy way to reproduce this on my system (where /dev/sdb is the block device for my USB stick) with:
while true; do dd if=/dev/zero of=/dev/sdb bs=4M; done
With my reproduction here are the relevant bits from the hung task detector:
INFO: task udevd:294 blocked for more than 122 seconds. ... udevd D 0 294 1 0x00400008 Call trace: ... mutex_lock_nested+0x40/0x50 __blkdev_get+0x7c/0x3d4 blkdev_get+0x118/0x138 blkdev_open+0x94/0xa8 do_dentry_open+0x268/0x3a0 vfs_open+0x34/0x40 path_openat+0x39c/0xdf4 do_filp_open+0x90/0x10c do_sys_open+0x150/0x3c8 ...
... Showing all locks held in the system: ... 1 lock held by dd/2798: #0: ffffff814ac1a3b8 (&bdev->bd_mutex){+.+.}, at: __blkdev_put+0x50/0x204 ... dd D 0 2798 2764 0x00400208 Call trace: ... schedule+0x8c/0xbc io_schedule+0x1c/0x40 wait_on_page_bit_common+0x238/0x338 __lock_page+0x5c/0x68 write_cache_pages+0x194/0x500 generic_writepages+0x64/0xa4 blkdev_writepages+0x24/0x30 do_writepages+0x48/0xa8 __filemap_fdatawrite_range+0xac/0xd8 filemap_write_and_wait+0x30/0x84 __blkdev_put+0x88/0x204 blkdev_put+0xc4/0xe4 blkdev_close+0x28/0x38 __fput+0xe0/0x238 ____fput+0x1c/0x28 task_work_run+0xb0/0xe4 do_notify_resume+0xfc0/0x14bc work_pending+0x8/0x14
The problem appears related to the fact that my USB disk is terribly slow and that I have a lot of RAM in my system to cache things. Specifically my writes seem to be happening at ~15 MB/s and I've got ~4 GB of RAM in my system that can be used for buffering. To write 4 GB of buffer to disk thus takes ~4000 MB / ~15 MB/s = ~267 seconds.
The 267 second number is a problem because in __blkdev_put() we call sync_blockdev() while holding the bd_mutex. Any other callers who want the bd_mutex will be blocked for the whole time.
The problem is made worse because I believe blkdev_put() specifically tells other tasks (namely udev) to go try to access the device at right around the same time we're going to hold the mutex for a long time.
Putting some traces around this (after disabling the hung task detector), I could confirm: dd: 437.608600: __blkdev_put() right before sync_blockdev() for sdb udevd: 437.623901: blkdev_open() right before blkdev_get() for sdb dd: 661.468451: __blkdev_put() right after sync_blockdev() for sdb udevd: 663.820426: blkdev_open() right after blkdev_get() for sdb
A simple fix for this is to realize that sync_blockdev() works fine if you're not holding the mutex. Also, it's not the end of the world if you sync a little early (though it can have performance impacts). Thus we can make a guess that we're going to need to do the sync and then do it without holding the mutex. We still do one last sync with the mutex but it should be much, much faster.
With this, my hung task warnings for my test case are gone.
Signed-off-by: Douglas Anderson dianders@chromium.org Reviewed-by: Guenter Roeck groeck@chromium.org Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sasha Levin sashal@kernel.org --- fs/block_dev.c | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/fs/block_dev.c b/fs/block_dev.c index 2dc9c73a4cb29..79272cdbe8277 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1857,6 +1857,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) struct gendisk *disk = bdev->bd_disk; struct block_device *victim = NULL;
+ /* + * Sync early if it looks like we're the last one. If someone else + * opens the block device between now and the decrement of bd_openers + * then we did a sync that we didn't need to, but that's not the end + * of the world and we want to avoid long (could be several minute) + * syncs while holding the mutex. + */ + if (bdev->bd_openers == 1) + sync_blockdev(bdev); + mutex_lock_nested(&bdev->bd_mutex, for_part); if (for_part) bdev->bd_part_count--;
From: Madhuparna Bhowmik madhuparnabhowmik10@gmail.com
[ Upstream commit 44b8fb6eaa7c3fb770bf1e37619cdb3902cca1fc ]
After registering character device the file operation callbacks can be called. The open callback registers interrupt handler. Therefore interrupt handler can execute in parallel with rest of the init function. To avoid such data race initialize telclk_interrupt variable and struct alarm_events before registering character device.
Found by Linux Driver Verification project (linuxtesting.org).
Signed-off-by: Madhuparna Bhowmik madhuparnabhowmik10@gmail.com Link: https://lore.kernel.org/r/20200417153451.1551-1-madhuparnabhowmik10@gmail.co... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/tlclk.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 6d81bb3bb503f..896a3550fba9f 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -777,17 +777,21 @@ static int __init tlclk_init(void) { int ret;
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); + + alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); + if (!alarm_events) { + ret = -ENOMEM; + goto out1; + } + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); if (ret < 0) { printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); + kfree(alarm_events); return ret; } tlclk_major = ret; - alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); - if (!alarm_events) { - ret = -ENOMEM; - goto out1; - }
/* Read telecom clock IRQ number (Set by BIOS) */ if (!request_region(TLCLK_BASE, 8, "telco_clock")) { @@ -796,7 +800,6 @@ static int __init tlclk_init(void) ret = -EBUSY; goto out2; } - telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", @@ -837,8 +840,8 @@ out3: release_region(TLCLK_BASE, 8); out2: kfree(alarm_events); -out1: unregister_chrdev(tlclk_major, "telco_clock"); +out1: return ret; }
From: Zenghui Yu yuzenghui@huawei.com
[ Upstream commit 969ce8b5260d8ec01e6f1949d2927a86419663ce ]
It's likely that the vcpu fails to handle all virtual interrupts if userspace decides to destroy it, leaving the pending ones stay in the ap_list. If the un-handled one is a LPI, its vgic_irq structure will be eventually leaked because of an extra refcount increment in vgic_queue_irq_unlock().
This was detected by kmemleak on almost every guest destroy, the backtrace is as follows:
unreferenced object 0xffff80725aed5500 (size 128): comm "CPU 5/KVM", pid 40711, jiffies 4298024754 (age 166366.512s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 08 01 a9 73 6d 80 ff ff ...........sm... c8 61 ee a9 00 20 ff ff 28 1e 55 81 6c 80 ff ff .a... ..(.U.l... backtrace: [<000000004bcaa122>] kmem_cache_alloc_trace+0x2dc/0x418 [<0000000069c7dabb>] vgic_add_lpi+0x88/0x418 [<00000000bfefd5c5>] vgic_its_cmd_handle_mapi+0x4dc/0x588 [<00000000cf993975>] vgic_its_process_commands.part.5+0x484/0x1198 [<000000004bd3f8e3>] vgic_its_process_commands+0x50/0x80 [<00000000b9a65b2b>] vgic_mmio_write_its_cwriter+0xac/0x108 [<0000000009641ebb>] dispatch_mmio_write+0xd0/0x188 [<000000008f79d288>] __kvm_io_bus_write+0x134/0x240 [<00000000882f39ac>] kvm_io_bus_write+0xe0/0x150 [<0000000078197602>] io_mem_abort+0x484/0x7b8 [<0000000060954e3c>] kvm_handle_guest_abort+0x4cc/0xa58 [<00000000e0d0cd65>] handle_exit+0x24c/0x770 [<00000000b44a7fad>] kvm_arch_vcpu_ioctl_run+0x460/0x1988 [<0000000025fb897c>] kvm_vcpu_ioctl+0x4f8/0xee0 [<000000003271e317>] do_vfs_ioctl+0x160/0xcd8 [<00000000e7f39607>] ksys_ioctl+0x98/0xd8
Fix it by retiring all pending LPIs in the ap_list on the destroy path.
p.s. I can also reproduce it on a normal guest shutdown. It is because userspace still send LPIs to vcpu (through KVM_SIGNAL_MSI ioctl) while the guest is being shutdown and unable to handle it. A little strange though and haven't dig further...
Reviewed-by: James Morse james.morse@arm.com Signed-off-by: Zenghui Yu yuzenghui@huawei.com [maz: moved the distributor deallocation down to avoid an UAF splat] Signed-off-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20200414030349.625-2-yuzenghui@huawei.com Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/arm/vgic/vgic-init.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 6d85c6d894c39..6899101538890 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -358,6 +358,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ /* + * Retire all pending LPIs on this vcpu anyway as we're + * going to destroy it. + */ + vgic_flush_pending_lpis(vcpu); + INIT_LIST_HEAD(&vgic_cpu->ap_list_head); }
@@ -369,10 +375,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
vgic_debug_destroy(kvm);
- kvm_vgic_dist_destroy(kvm); - kvm_for_each_vcpu(i, vcpu, kvm) kvm_vgic_vcpu_destroy(vcpu); + + kvm_vgic_dist_destroy(kvm); }
void kvm_vgic_destroy(struct kvm *kvm)
From: Zenghui Yu yuzenghui@huawei.com
[ Upstream commit 57bdb436ce869a45881d8aa4bc5dac8e072dd2b6 ]
If we're going to fail out the vgic_add_lpi(), let's make sure the allocated vgic_irq memory is also freed. Though it seems that both cases are unlikely to fail.
Signed-off-by: Zenghui Yu yuzenghui@huawei.com Signed-off-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20200414030349.625-3-yuzenghui@huawei.com Signed-off-by: Sasha Levin sashal@kernel.org --- virt/kvm/arm/vgic/vgic-its.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index f8ad7096555d7..35be0e2a46393 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -96,14 +96,21 @@ out_unlock: * We "cache" the configuration table entries in our struct vgic_irq's. * However we only have those structs for mapped IRQs, so we read in * the respective config data from memory here upon mapping the LPI. + * + * Should any of these fail, behave as if we couldn't create the LPI + * by dropping the refcount and returning the error. */ ret = update_lpi_config(kvm, irq, NULL, false); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + }
ret = vgic_v3_lpi_sync_pending_status(kvm, irq); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + }
return irq; }
From: Tonghao Zhang xiangxia.m.yue@gmail.com
[ Upstream commit e57358873bb5d6caa882b9684f59140912b37dde ]
When setting the meter rate to 4+Gbps, there is an overflow, the meters don't work as expected.
Cc: Pravin B Shelar pshelar@ovn.org Cc: Andy Zhou azhou@ovn.org Signed-off-by: Tonghao Zhang xiangxia.m.yue@gmail.com Acked-by: Pravin B Shelar pshelar@ovn.org Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/openvswitch/meter.c | 2 +- net/openvswitch/meter.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index 3323b79ff548d..b10734f18bbd6 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -251,7 +251,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) * * Start with a full bucket. */ - band->bucket = (band->burst_size + band->rate) * 1000; + band->bucket = (band->burst_size + band->rate) * 1000ULL; band_max_delta_t = band->bucket / band->rate; if (band_max_delta_t > meter->max_delta_t) meter->max_delta_t = band_max_delta_t; diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h index f645913870bd2..2e3fd6f1d7ebe 100644 --- a/net/openvswitch/meter.h +++ b/net/openvswitch/meter.h @@ -23,7 +23,7 @@ struct dp_meter_band { u32 type; u32 rate; u32 burst_size; - u32 bucket; /* 1/1000 packets, or in bits */ + u64 bucket; /* 1/1000 packets, or in bits */ struct ovs_flow_stats stats; };
From: Christophe JAILLET christophe.jaillet@wanadoo.fr
[ Upstream commit f7854c382240c1686900b2f098b36430c6f5047e ]
If 'scsi_host_alloc()' or 'kcalloc()' fail, 'error' is known to be 0. Set it explicitly to -ENOMEM before branching to the error handling path.
While at it, remove 2 useless assignments to 'error'. These values are overwridden a few lines later.
Link: https://lore.kernel.org/r/20200412094039.8822-1-christophe.jaillet@wanadoo.f... Signed-off-by: Christophe JAILLET christophe.jaillet@wanadoo.fr Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/aacraid/linit.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 514aed38b5afe..1035f947f1bcf 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1607,7 +1607,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost; struct aac_dev *aac; struct list_head *insert = &aac_devices; - int error = -ENODEV; + int error; int unique_id = 0; u64 dmamask; int mask_bits = 0; @@ -1632,7 +1632,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) error = pci_enable_device(pdev); if (error) goto out; - error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -1664,8 +1663,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); - if (!shost) + if (!shost) { + error = -ENOMEM; goto out_disable_pdev; + }
shost->irq = pdev->irq; shost->unique_id = unique_id; @@ -1690,8 +1691,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, sizeof(struct fib), GFP_KERNEL); - if (!aac->fibs) + if (!aac->fibs) { + error = -ENOMEM; goto out_free_host; + } + spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
From: Ivan Safonov insafonov@gmail.com
[ Upstream commit 628cbd971a927abe6388d44320e351c337b331e4 ]
skb clones use same data buffer, so tail of one skb is corrupted by beginning of next skb.
Signed-off-by: Ivan Safonov insafonov@gmail.com Link: https://lore.kernel.org/r/20200423191404.12028-1-insafonov@gmail.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/staging/rtl8188eu/core/rtw_recv.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-)
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index d4278361e0028..a036ef104198e 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -1525,21 +1525,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
/* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); - if (sub_skb) { - skb_reserve(sub_skb, 12); - skb_put_data(sub_skb, pdata, nSubframe_Length); - } else { - sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC); - if (sub_skb) { - sub_skb->data = pdata; - sub_skb->len = nSubframe_Length; - skb_set_tail_pointer(sub_skb, nSubframe_Length); - } else { - DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes); - break; - } + if (!sub_skb) { + DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes); + break; }
+ skb_reserve(sub_skb, 12); + skb_put_data(sub_skb, pdata, nSubframe_Length); + subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
From: Wei Yongjun weiyongjun1@huawei.com
[ Upstream commit ff62255a2a5c1228a28f2bb063646f948115a309 ]
Fix to return negative error code -ENOMEM from the error handling case instead of 0, as done elsewhere in this function.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Link: https://lore.kernel.org/r/20200427122415.47416-1-weiyongjun1@huawei.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/vcc.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index d2a1e1228c82d..9ffd42e333b83 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -605,6 +605,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) port->index = vcc_table_add(port); if (port->index == -1) { pr_err("VCC: no more TTY indices left for allocation\n"); + rv = -ENOMEM; goto free_ldc; }
From: Will Deacon will@kernel.org
[ Upstream commit 98448cdfe7060dd5491bfbd3f7214ffe1395d58e ]
We don't need to be quite as strict about mismatched AArch32 support, which is good because the friendly hardware folks have been busy mismatching this to their hearts' content.
* We don't care about EL2 or EL3 (there are silly comments concerning the latter, so remove those)
* EL1 support is gated by the ARM64_HAS_32BIT_EL1 capability and handled gracefully when a mismatch occurs
* EL0 support is gated by the ARM64_HAS_32BIT_EL0 capability and handled gracefully when a mismatch occurs
Relax the AArch32 checks to FTR_NONSTRICT.
Tested-by: Sai Prakash Ranjan saiprakash.ranjan@codeaurora.org Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Link: https://lore.kernel.org/r/20200421142922.18950-8-will@kernel.org Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/kernel/cpufeature.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f400cb29b811a..1df57ffc9314d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -160,11 +160,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), - /* Linux doesn't care about the EL3 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_END, };
@@ -719,9 +718,6 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
- /* - * EL3 is not our concern. - */ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
From: Paul Turner pjt@google.com
[ Upstream commit e98fa02c4f2ea4991dae422ac7e34d102d2f0599 ]
There is a race window in which an entity begins throttling before quota is added to the pool, but does not finish throttling until after we have finished with distribute_cfs_runtime(). This entity is not observed by distribute_cfs_runtime() because it was not on the throttled list at the time that distribution was running. This race manifests as rare period-length statlls for such entities.
Rather than heavy-weight the synchronization with the progress of distribution, we can fix this by aborting throttling if bandwidth has become available. Otherwise, we immediately add the entity to the throttled list so that it can be observed by a subsequent distribution.
Additionally, we can remove the case of adding the throttled entity to the head of the throttled list, and simply always add to the tail. Thanks to 26a8b12747c97, distribute_cfs_runtime() no longer holds onto its own pool of runtime. This means that if we do hit the !assign and distribute_running case, we know that distribution is about to end.
Signed-off-by: Paul Turner pjt@google.com Signed-off-by: Ben Segall bsegall@google.com Signed-off-by: Josh Don joshdon@google.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Reviewed-by: Phil Auld pauld@redhat.com Link: https://lkml.kernel.org/r/20200410225208.109717-2-joshdon@google.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/sched/fair.c | 79 +++++++++++++++++++++++++++------------------ 1 file changed, 47 insertions(+), 32 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 20bf1f66733ac..b02a83ff40687 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4383,16 +4383,16 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) }
/* returns 0 on failure to allocate runtime */ -static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) +static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, + struct cfs_rq *cfs_rq, u64 target_runtime) { - struct task_group *tg = cfs_rq->tg; - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); - u64 amount = 0, min_amount; + u64 min_amount, amount = 0; + + lockdep_assert_held(&cfs_b->lock);
/* note: this is a positive sum as runtime_remaining <= 0 */ - min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; + min_amount = target_runtime - cfs_rq->runtime_remaining;
- raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { @@ -4404,13 +4404,25 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) cfs_b->idle = 0; } } - raw_spin_unlock(&cfs_b->lock);
cfs_rq->runtime_remaining += amount;
return cfs_rq->runtime_remaining > 0; }
+/* returns 0 on failure to allocate runtime */ +static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + int ret; + + raw_spin_lock(&cfs_b->lock); + ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); + raw_spin_unlock(&cfs_b->lock); + + return ret; +} + static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) { /* dock delta_exec before expiring quota (as it could span periods) */ @@ -4499,13 +4511,33 @@ static int tg_throttle_down(struct task_group *tg, void *data) return 0; }
-static void throttle_cfs_rq(struct cfs_rq *cfs_rq) +static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; long task_delta, idle_task_delta, dequeue = 1; - bool empty; + + raw_spin_lock(&cfs_b->lock); + /* This will start the period timer if necessary */ + if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { + /* + * We have raced with bandwidth becoming available, and if we + * actually throttled the timer might not unthrottle us for an + * entire period. We additionally needed to make sure that any + * subsequent check_cfs_rq_runtime calls agree not to throttle + * us, as we may commit to do cfs put_prev+pick_next, so we ask + * for 1ns of runtime rather than just check cfs_b. + */ + dequeue = 0; + } else { + list_add_tail_rcu(&cfs_rq->throttled_list, + &cfs_b->throttled_cfs_rq); + } + raw_spin_unlock(&cfs_b->lock); + + if (!dequeue) + return false; /* Throttle no longer required. */
se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
@@ -4534,29 +4566,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) sub_nr_running(rq, task_delta);
- cfs_rq->throttled = 1; - cfs_rq->throttled_clock = rq_clock(rq); - raw_spin_lock(&cfs_b->lock); - empty = list_empty(&cfs_b->throttled_cfs_rq); - - /* - * Add to the _head_ of the list, so that an already-started - * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is - * not running add to the tail so that later runqueues don't get starved. - */ - if (cfs_b->distribute_running) - list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); - else - list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); - /* - * If we're the first throttled task, make sure the bandwidth - * timer is running. + * Note: distribution will already see us throttled via the + * throttled-list. rq->lock protects completion. */ - if (empty) - start_cfs_bandwidth(cfs_b); - - raw_spin_unlock(&cfs_b->lock); + cfs_rq->throttled = 1; + cfs_rq->throttled_clock = rq_clock(rq); + return true; }
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) @@ -4915,8 +4931,7 @@ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) if (cfs_rq_throttled(cfs_rq)) return true;
- throttle_cfs_rq(cfs_rq); - return true; + return throttle_cfs_rq(cfs_rq); }
static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
From: Wei Yongjun weiyongjun1@huawei.com
[ Upstream commit 97fff7c8de1e54e5326dfeb66085796864bceb64 ]
Fix to return negative error code -ENOMEM from the error handling case instead of 0, as done elsewhere in this function.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 7a248cc1055a3..7af7cc7c8669a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2654,8 +2654,10 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * dpaa2_eth_fs_count(priv), GFP_KERNEL); - if (!priv->cls_rules) + if (!priv->cls_rules) { + err = -ENOMEM; goto close; + }
return 0;
From: Krzysztof Kozlowski krzk@kernel.org
[ Upstream commit 8c149b7d75e53be47648742f40fc90d9fc6fa63a ]
The required supplies in bindings were actually not matching implementation making the bindings incorrect and misleading. The Linux kernel driver requires all supplies to be present. Also for wlf,wm8994 uses just DBVDD-supply instead of DBVDDn-supply (n: <1,3>).
Reported-by: Jonathan Bakker xc-racer2@live.ca Signed-off-by: Krzysztof Kozlowski krzk@kernel.org Link: https://lore.kernel.org/r/20200501133534.6706-1-krzk@kernel.org Signed-off-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- .../devicetree/bindings/sound/wm8994.txt | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt index 68cccc4653ba3..367b58ce1bb92 100644 --- a/Documentation/devicetree/bindings/sound/wm8994.txt +++ b/Documentation/devicetree/bindings/sound/wm8994.txt @@ -14,9 +14,15 @@ Required properties: - #gpio-cells : Must be 2. The first cell is the pin number and the second cell is used to specify optional parameters (currently unused).
- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, - SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered - in Documentation/devicetree/bindings/regulator/regulator.txt + - power supplies for the device, as covered in + Documentation/devicetree/bindings/regulator/regulator.txt, depending + on compatible: + - for wlf,wm1811 and wlf,wm8958: + AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, + DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply + - for wlf,wm8994: + AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply, + SPKVDD1-supply, SPKVDD2-supply
Optional properties:
@@ -73,11 +79,11 @@ wm8994: codec@1a {
lineout1-se;
+ AVDD1-supply = <®ulator>; AVDD2-supply = <®ulator>; CPVDD-supply = <®ulator>; - DBVDD1-supply = <®ulator>; - DBVDD2-supply = <®ulator>; - DBVDD3-supply = <®ulator>; + DBVDD-supply = <®ulator>; + DCVDD-supply = <®ulator>; SPKVDD1-supply = <®ulator>; SPKVDD2-supply = <®ulator>; };
From: Aya Levin ayal@mellanox.com
[ Upstream commit bea0c5c942d3b4e9fb6ed45f6a7de74c6b112437 ]
Devlink health core conditions the reporter's recovery with the expiration of the grace period. This is not relevant for the first recovery. Explicitly demand that the grace period will only apply to recoveries other than the first.
Fixes: c8e1da0bf923 ("devlink: Add health report functionality") Signed-off-by: Aya Levin ayal@mellanox.com Reviewed-by: Moshe Shemesh moshe@mellanox.com Reviewed-by: Jiri Pirko jiri@mellanox.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/core/devlink.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/net/core/devlink.c b/net/core/devlink.c index 5667cae57072f..26c8993a17ae0 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4823,6 +4823,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter, { enum devlink_health_reporter_state prev_health_state; struct devlink *devlink = reporter->devlink; + unsigned long recover_ts_threshold;
/* write a log message of the current error */ WARN_ON(!msg); @@ -4832,10 +4833,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter, reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
/* abort if the previous error wasn't recovered */ + recover_ts_threshold = reporter->last_recovery_ts + + msecs_to_jiffies(reporter->graceful_period); if (reporter->auto_recover && (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY || - jiffies - reporter->last_recovery_ts < - msecs_to_jiffies(reporter->graceful_period))) { + (reporter->last_recovery_ts && reporter->recovery_count && + time_is_after_jiffies(recover_ts_threshold)))) { trace_devlink_health_recover_aborted(devlink, reporter->ops->name, reporter->health_state,
From: Cong Wang xiyou.wangcong@gmail.com
[ Upstream commit 8d9f73c0ad2f20e9fed5380de0a3097825859d03 ]
In lec_arp_clear_vccs() only entry->vcc is freed, but vcc could be installed on entry->recv_vcc too in lec_vcc_added().
This fixes the following memory leak:
unreferenced object 0xffff8880d9266b90 (size 16): comm "atm2", pid 425, jiffies 4294907980 (age 23.488s) hex dump (first 16 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 6b 6b 6b a5 ............kkk. backtrace: [<(____ptrval____)>] kmem_cache_alloc_trace+0x10e/0x151 [<(____ptrval____)>] lane_ioctl+0x4b3/0x569 [<(____ptrval____)>] do_vcc_ioctl+0x1ea/0x236 [<(____ptrval____)>] svc_ioctl+0x17d/0x198 [<(____ptrval____)>] sock_do_ioctl+0x47/0x12f [<(____ptrval____)>] sock_ioctl+0x2f9/0x322 [<(____ptrval____)>] vfs_ioctl+0x1e/0x2b [<(____ptrval____)>] ksys_ioctl+0x61/0x80 [<(____ptrval____)>] __x64_sys_ioctl+0x16/0x19 [<(____ptrval____)>] do_syscall_64+0x57/0x65 [<(____ptrval____)>] entry_SYSCALL_64_after_hwframe+0x49/0xb3
Cc: Gengming Liu l.dmxcsnsbh@gmail.com Signed-off-by: Cong Wang xiyou.wangcong@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/atm/lec.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5a77c235a212f..3625a04a6c701 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1269,6 +1269,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) entry->vcc = NULL; } if (entry->recv_vcc) { + struct atm_vcc *vcc = entry->recv_vcc; + struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); + + kfree(vpriv); + vcc->user_back = NULL; + entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL;
From: Stanimir Varbanov stanimir.varbanov@linaro.org
[ Upstream commit bc3d870e414b42d72cd386aa20a4fc3612e4feb7 ]
Presently the list initialization is done only in dynamic-resolution-change state, which leads to list corruptions and use-after-free. Init list_head unconditionally in vdec_stop_capture called by vb2 stop_streaming without takeing into account current codec state.
Signed-off-by: Stanimir Varbanov stanimir.varbanov@linaro.org Signed-off-by: Mauro Carvalho Chehab mchehab+huawei@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/media/platform/qcom/venus/vdec.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 59ae7a1e63bc2..05b80a66e80ed 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -987,13 +987,14 @@ static int vdec_stop_capture(struct venus_inst *inst) ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT); vdec_cancel_dst_buffers(inst); inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP; - INIT_LIST_HEAD(&inst->registeredbufs); venus_helper_free_dpb_bufs(inst); break; default: - return 0; + break; }
+ INIT_LIST_HEAD(&inst->registeredbufs); + return ret; }
From: Ian Rogers irogers@google.com
[ Upstream commit e8dfb81838b14f82521968343884665b996646ef ]
Fix a memory leak found by applying LLVM's libfuzzer on parse_events().
Signed-off-by: Ian Rogers irogers@google.com Acked-by: Jiri Olsa jolsa@redhat.com Cc: Adrian Hunter adrian.hunter@intel.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Andi Kleen ak@linux.intel.com Cc: Leo Yan leo.yan@linaro.org Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Cc: clang-built-linux@googlegroups.com Link: http://lore.kernel.org/lkml/20200319023101.82458-1-irogers@google.com [ split from a larger patch, use zfree() ] Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/parse-events.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 2a97a5e3aa91e..5fadad158db59 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1370,6 +1370,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
list_for_each_entry_safe(pos, tmp, &config_terms, list) { list_del_init(&pos->list); + zfree(&pos->val.str); free(pos); } return -EINVAL;
From: Ian Rogers irogers@google.com
[ Upstream commit 266150c94c69429cf6d18e130237224a047f5061 ]
Realloc of size zero is a free not an error, avoid this causing a double free. Caught by clang's address sanitizer:
==2634==ERROR: AddressSanitizer: attempting double-free on 0x6020000015f0 in thread T0: #0 0x5649659297fd in free llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:123:3 #1 0x5649659e9251 in __zfree tools/lib/zalloc.c:13:2 #2 0x564965c0f92c in mem2node__exit tools/perf/util/mem2node.c:114:2 #3 0x564965a08b4c in perf_c2c__report tools/perf/builtin-c2c.c:2867:2 #4 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10 #5 0x564965944348 in run_builtin tools/perf/perf.c:312:11 #6 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8 #7 0x5649659440c4 in run_argv tools/perf/perf.c:408:2 #8 0x564965942e41 in main tools/perf/perf.c:538:3
0x6020000015f0 is located 0 bytes inside of 1-byte region [0x6020000015f0,0x6020000015f1) freed by thread T0 here: #0 0x564965929da3 in realloc third_party/llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:164:3 #1 0x564965c0f55e in mem2node__init tools/perf/util/mem2node.c:97:16 #2 0x564965a08956 in perf_c2c__report tools/perf/builtin-c2c.c:2803:8 #3 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10 #4 0x564965944348 in run_builtin tools/perf/perf.c:312:11 #5 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8 #6 0x5649659440c4 in run_argv tools/perf/perf.c:408:2 #7 0x564965942e41 in main tools/perf/perf.c:538:3
previously allocated by thread T0 here: #0 0x564965929c42 in calloc third_party/llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:154:3 #1 0x5649659e9220 in zalloc tools/lib/zalloc.c:8:9 #2 0x564965c0f32d in mem2node__init tools/perf/util/mem2node.c:61:12 #3 0x564965a08956 in perf_c2c__report tools/perf/builtin-c2c.c:2803:8 #4 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10 #5 0x564965944348 in run_builtin tools/perf/perf.c:312:11 #6 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8 #7 0x5649659440c4 in run_argv tools/perf/perf.c:408:2 #8 0x564965942e41 in main tools/perf/perf.c:538:3
v2: add a WARN_ON_ONCE when the free condition arises.
Signed-off-by: Ian Rogers irogers@google.com Acked-by: Jiri Olsa jolsa@redhat.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Cc: clang-built-linux@googlegroups.com Link: http://lore.kernel.org/lkml/20200320182347.87675-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/mem2node.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c index 797d86a1ab095..c84f5841c7abd 100644 --- a/tools/perf/util/mem2node.c +++ b/tools/perf/util/mem2node.c @@ -1,5 +1,6 @@ #include <errno.h> #include <inttypes.h> +#include <asm/bug.h> #include <linux/bitmap.h> #include <linux/kernel.h> #include <linux/zalloc.h> @@ -95,7 +96,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env)
/* Cut unused entries, due to merging. */ tmp_entries = realloc(entries, sizeof(*entries) * j); - if (tmp_entries) + if (tmp_entries || WARN_ON_ONCE(j == 0)) entries = tmp_entries;
for (i = 0; i < j; i++) {
From: Waiman Long longman@redhat.com
[ Upstream commit cbfc35a48609ceac978791e3ab9dde0c01f8cb20 ]
In a couple of places in the slub memory allocator, the code uses "s->offset" as a check to see if the free pointer is put right after the object. That check is no longer true with commit 3202fa62fb43 ("slub: relocate freelist pointer to middle of object").
As a result, echoing "1" into the validate sysfs file, e.g. of dentry, may cause a bunch of "Freepointer corrupt" error reports like the following to appear with the system in panic afterwards.
============================================================================= BUG dentry(666:pmcd.service) (Tainted: G B): Freepointer corrupt -----------------------------------------------------------------------------
To fix it, use the check "s->offset == s->inuse" in the new helper function freeptr_outside_object() instead. Also add another helper function get_info_end() to return the end of info block (inuse + free pointer if not overlapping with object).
Fixes: 3202fa62fb43 ("slub: relocate freelist pointer to middle of object") Signed-off-by: Waiman Long longman@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Matthew Wilcox (Oracle) willy@infradead.org Reviewed-by: Kees Cook keescook@chromium.org Acked-by: Rafael Aquini aquini@redhat.com Cc: Christoph Lameter cl@linux.com Cc: Vitaly Nikolenko vnik@duasynt.com Cc: Silvio Cesare silvio.cesare@gmail.com Cc: Pekka Enberg penberg@kernel.org Cc: David Rientjes rientjes@google.com Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Markus Elfring Markus.Elfring@web.de Cc: Changbin Du changbin.du@gmail.com Link: http://lkml.kernel.org/r/20200429135328.26976-1-longman@redhat.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/slub.c | 45 ++++++++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 15 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c index 822ba07245291..d69934eac9e94 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -533,15 +533,32 @@ static void print_section(char *level, char *text, u8 *addr, metadata_access_disable(); }
+/* + * See comment in calculate_sizes(). + */ +static inline bool freeptr_outside_object(struct kmem_cache *s) +{ + return s->offset >= s->inuse; +} + +/* + * Return offset of the end of info block which is inuse + free pointer if + * not overlapping with object. + */ +static inline unsigned int get_info_end(struct kmem_cache *s) +{ + if (freeptr_outside_object(s)) + return s->inuse + sizeof(void *); + else + return s->inuse; +} + static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p;
- if (s->offset) - p = object + s->offset + sizeof(void *); - else - p = object + s->inuse; + p = object + get_info_end(s);
return p + alloc; } @@ -682,10 +699,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) print_section(KERN_ERR, "Redzone ", p + s->object_size, s->inuse - s->object_size);
- if (s->offset) - off = s->offset + sizeof(void *); - else - off = s->inuse; + off = get_info_end(s);
if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); @@ -776,7 +790,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, * object address * Bytes of the object to be managed. * If the freepointer may overlay the object then the free - * pointer is the first word of the object. + * pointer is at the middle of the object. * * Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) @@ -810,11 +824,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) { - unsigned long off = s->inuse; /* The end of info */ - - if (s->offset) - /* Freepointer is placed after the object. */ - off += sizeof(void *); + unsigned long off = get_info_end(s); /* The end of info */
if (s->flags & SLAB_STORE_USER) /* We also have user information there */ @@ -900,7 +910,7 @@ static int check_object(struct kmem_cache *s, struct page *page, check_pad_bytes(s, page, p); }
- if (!s->offset && val == SLUB_RED_ACTIVE) + if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) /* * Object and freepointer overlap. Cannot check * freepointer while object is allocated. @@ -3585,6 +3595,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) * * This is the case if we do RCU, have a constructor or * destructor or are poisoning the objects. + * + * The assumption that s->offset >= s->inuse means free + * pointer is outside of the object is used in the + * freeptr_outside_object() function. If that is no + * longer true, the function needs to be modified. */ s->offset = size; size += sizeof(void *);
From: Thierry Reding treding@nvidia.com
[ Upstream commit 44c99904cf61f945d02ac9976ab10dd5ccaea393 ]
Depending on the board design, the I2C controllers found on Tegra SoCs may require pinmuxing in order to function. This is done as part of the driver's runtime suspend/resume operations. However, the PM core does not allow devices to go into runtime suspend during system sleep to avoid potential races with the suspend/resume of their parents.
As a result of this, when Tegra SoCs resume from system suspend, their I2C controllers may have lost the pinmux state in hardware, whereas the pinctrl subsystem is not aware of this. To fix this, make sure that if the I2C controller is not runtime suspended, the runtime suspend code is still executed in order to disable the module clock (which we don't need to be enabled during sleep) and set the pinmux to the idle state.
Conversely, make sure that the I2C controller is properly resumed when waking up from sleep so that pinmux settings are properly restored.
This fixes a bug seen with DDC transactions to an HDMI monitor timing out when resuming from system suspend.
Signed-off-by: Thierry Reding treding@nvidia.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/i2c/busses/i2c-tegra.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 5ca72fb0b406c..db94e96aed77e 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1721,10 +1721,14 @@ static int tegra_i2c_remove(struct platform_device *pdev) static int __maybe_unused tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); + int err = 0;
i2c_mark_adapter_suspended(&i2c_dev->adapter);
- return 0; + if (!pm_runtime_status_suspended(dev)) + err = tegra_i2c_runtime_suspend(dev); + + return err; }
static int __maybe_unused tegra_i2c_resume(struct device *dev) @@ -1732,6 +1736,10 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int err;
+ /* + * We need to ensure that clocks are enabled so that registers can be + * restored in tegra_i2c_init(). + */ err = tegra_i2c_runtime_resume(dev); if (err) return err; @@ -1740,9 +1748,16 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev) if (err) return err;
- err = tegra_i2c_runtime_suspend(dev); - if (err) - return err; + /* + * In case we are runtime suspended, disable clocks again so that we + * don't unbalance the clock reference counts during the next runtime + * resume transition. + */ + if (pm_runtime_status_suspended(dev)) { + err = tegra_i2c_runtime_suspend(dev); + if (err) + return err; + }
i2c_mark_adapter_resumed(&i2c_dev->adapter);
From: Jonathan Bakker xc-racer2@live.ca
[ Upstream commit 0383024f811aa469df258039807810fc3793a105 ]
According to the datasheet available at (1), the bottom four bits are always zero and the actual voltage is 1.25x this value in mV. Since the kernel API specifies that voltages should be in uV, it should report 1250x the shifted value.
1) https://datasheets.maximintegrated.com/en/ds/MAX17040-MAX17041.pdf
Signed-off-by: Jonathan Bakker xc-racer2@live.ca Signed-off-by: Sebastian Reichel sebastian.reichel@collabora.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/power/supply/max17040_battery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 62499018e68bf..2e845045a3fc0 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -105,7 +105,7 @@ static void max17040_get_vcell(struct i2c_client *client)
vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = vcell; + chip->vcell = (vcell >> 4) * 1250; }
static void max17040_get_soc(struct i2c_client *client)
From: Jonathan Bakker xc-racer2@live.ca
[ Upstream commit 05942b8c36c7eb5d3fc5e375d4b0d0c49562e85d ]
The USB phy takes some time to reset, so make sure we give it to it. The delay length was taken from the 4x12 phy driver.
This manifested in issues with the DWC2 driver since commit fe369e1826b3 ("usb: dwc2: Make dwc2_readl/writel functions endianness-agnostic.") where the endianness check would read the DWC ID as 0 due to the phy still resetting, resulting in the wrong endian mode being chosen.
Signed-off-by: Jonathan Bakker xc-racer2@live.ca Link: https://lore.kernel.org/r/BN6PR04MB06605D52502816E500683553A3D10@BN6PR04MB06... Signed-off-by: Kishon Vijay Abraham I kishon@ti.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/phy/samsung/phy-s5pv210-usb2.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c index 56a5083fe6f94..32be62e498047 100644 --- a/drivers/phy/samsung/phy-s5pv210-usb2.c +++ b/drivers/phy/samsung/phy-s5pv210-usb2.c @@ -139,6 +139,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) udelay(10); rst &= ~rstbits; writel(rst, drv->reg_phy + S5PV210_UPHYRST); + /* The following delay is necessary for the reset sequence to be + * completed + */ + udelay(80); } else { pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); pwr |= phypwr;
From: Sonny Sasaka sonnysasaka@chromium.org
[ Upstream commit adf1d6926444029396861413aba8a0f2a805742a ]
After sending Inquiry Cancel command to the controller, it is possible that Inquiry Complete event comes before Inquiry Cancel command complete event. In this case the Inquiry Cancel command will have status of Command Disallowed since there is no Inquiry session to be cancelled. This case should not be treated as error, otherwise we can reach an inconsistent state.
Example of a btmon trace when this happened:
< HCI Command: Inquiry Cancel (0x01|0x0002) plen 0
HCI Event: Inquiry Complete (0x01) plen 1
Status: Success (0x00)
HCI Event: Command Complete (0x0e) plen 4
Inquiry Cancel (0x01|0x0002) ncmd 1 Status: Command Disallowed (0x0c)
Signed-off-by: Sonny Sasaka sonnysasaka@chromium.org Signed-off-by: Marcel Holtmann marcel@holtmann.org Signed-off-by: Sasha Levin sashal@kernel.org --- net/bluetooth/hci_event.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 1bbeb14b8b64e..fd436e5d7b542 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -41,12 +41,27 @@
/* Handle HCI Event packets */
-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, + u8 *new_status) { __u8 status = *((__u8 *) skb->data);
BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ /* It is possible that we receive Inquiry Complete event right + * before we receive Inquiry Cancel Command Complete event, in + * which case the latter event should have status of Command + * Disallowed (0x0c). This should not be treated as error, since + * we actually achieve what Inquiry Cancel wants to achieve, + * which is to end the last Inquiry session. + */ + if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); + status = 0x00; + } + + *new_status = status; + if (status) return;
@@ -3142,7 +3157,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
switch (*opcode) { case HCI_OP_INQUIRY_CANCEL: - hci_cc_inquiry_cancel(hdev, skb); + hci_cc_inquiry_cancel(hdev, skb, status); break;
case HCI_OP_PERIODIC_INQ:
From: Tang Bin tangbin@cmss.chinamobile.com
[ Upstream commit c856b4b0fdb5044bca4c0acf9a66f3b5cc01a37a ]
If the function platform_get_irq() failed, the negative value returned will not be detected here. So fix error handling in mv_ehci_probe(). And when get irq failed, the function platform_get_irq() logs an error message, so remove redundant message here.
Signed-off-by: Zhang Shengju zhangshengju@cmss.chinamobile.com Signed-off-by: Tang Bin tangbin@cmss.chinamobile.com Link: https://lore.kernel.org/r/20200508114305.15740-1-tangbin@cmss.chinamobile.co... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/usb/host/ehci-mv.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 66ec1fdf9fe7d..15b2e8910e9b7 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -157,9 +157,8 @@ static int mv_ehci_probe(struct platform_device *pdev) hcd->regs = ehci_mv->op_regs;
hcd->irq = platform_get_irq(pdev, 0); - if (!hcd->irq) { - dev_err(&pdev->dev, "Cannot get irq."); - retval = -ENODEV; + if (hcd->irq < 0) { + retval = hcd->irq; goto err_disable_clk; }
From: Paolo Bonzini pbonzini@redhat.com
[ Upstream commit fede8076aab4c2280c673492f8f7a2e87712e8b4 ]
KVM is not handling the case where EIP wraps around the 32-bit address space (that is, outside long mode). This is needed both in vmx.c and in emulate.c. SVM with NRIPS is okay, but it can still print an error to dmesg due to integer overflow.
Reported-by: Nick Peterson everdox@gmail.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/kvm/emulate.c | 2 ++ arch/x86/kvm/svm.c | 3 --- arch/x86/kvm/vmx/vmx.c | 15 ++++++++++++--- 3 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 128d3ad46e965..cc7823e7ef96c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5836,6 +5836,8 @@ writeback: }
ctxt->eip = ctxt->_eip; + if (ctxt->mode != X86EMUL_MODE_PROT64) + ctxt->eip = (u32)ctxt->_eip;
done: if (rc == X86EMUL_PROPAGATE_FAULT) { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3243a80ea32c0..802b5f9ab7446 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -787,9 +787,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) return 0; } else { - if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) - pr_err("%s: ip 0x%lx next 0x%llx\n", - __func__, kvm_rip_read(vcpu), svm->next_rip); kvm_rip_write(vcpu, svm->next_rip); } svm_set_interrupt_shadow(vcpu, 0); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a071eab3bab74..14b973990d5a8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1541,7 +1541,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
static int skip_emulated_instruction(struct kvm_vcpu *vcpu) { - unsigned long rip; + unsigned long rip, orig_rip;
/* * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on @@ -1553,8 +1553,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) */ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { - rip = kvm_rip_read(vcpu); - rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + orig_rip = kvm_rip_read(vcpu); + rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); +#ifdef CONFIG_X86_64 + /* + * We need to mask out the high 32 bits of RIP if not in 64-bit + * mode, but just finding out that we are in 64-bit mode is + * quite expensive. Only do it if there was a carry. + */ + if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu)) + rip = (u32)rip; +#endif kvm_rip_write(vcpu, rip); } else { if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
From: Tuong Lien tuong.t.lien@dektech.com.au
[ Upstream commit 0771d7df819284d46cf5cfb57698621b503ec17f ]
Upon receipt of a service subscription request from user via a topology connection, one 'sub' object will be allocated in kernel, so it will be able to send an event of the service if any to the user correspondingly then. Also, in case of any failure, the connection will be shutdown and all the pertaining 'sub' objects will be freed.
However, there is a race condition as follows resulting in memory leak:
receive-work connection send-work | | | sub-1 |<------//-------| | sub-2 |<------//-------| | | |<---------------| evt for sub-x sub-3 |<------//-------| | : : : : : : | /--------| | | | * peer closed | | | | | | | |<-------X-------| evt for sub-y | | |<===============| sub-n |<------/ X shutdown | -> orphan | |
That is, the 'receive-work' may get the last subscription request while the 'send-work' is shutting down the connection due to peer close.
We had a 'lock' on the connection, so the two actions cannot be carried out simultaneously. If the last subscription is allocated e.g. 'sub-n', before the 'send-work' closes the connection, there will be no issue at all, the 'sub' objects will be freed. In contrast the last subscription will become orphan since the connection was closed, and we released all references.
This commit fixes the issue by simply adding one test if the connection remains in 'connected' state right after we obtain the connection lock, then a subscription object can be created as usual, otherwise we ignore it.
Acked-by: Ying Xue ying.xue@windriver.com Acked-by: Jon Maloy jmaloy@redhat.com Reported-by: Thang Ngo thang.h.ngo@dektech.com.au Signed-off-by: Tuong Lien tuong.t.lien@dektech.com.au Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/tipc/topsrv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 73dbed0c4b6b8..931c426673c02 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -400,7 +400,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) return -EWOULDBLOCK; if (ret == sizeof(s)) { read_lock_bh(&sk->sk_callback_lock); - ret = tipc_conn_rcv_sub(srv, con, &s); + /* RACE: the connection can be closed in the meantime */ + if (likely(connected(con))) + ret = tipc_conn_rcv_sub(srv, con, &s); read_unlock_bh(&sk->sk_callback_lock); if (!ret) return 0;
From: Evan Quan evan.quan@amd.com
[ Upstream commit 9495220577416632675959caf122e968469ffd16 ]
Normally this(SW CTF) should not happen. And by doing graceful shutdown we can prevent further damage.
Signed-off-by: Evan Quan evan.quan@amd.com Reviewed-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 21 +++++++++++++++---- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 7 +++++++ 2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index d09690fca4520..414added3d02c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -22,6 +22,7 @@ */
#include <linux/pci.h> +#include <linux/reboot.h>
#include "hwmgr.h" #include "pp_debug.h" @@ -593,12 +594,18 @@ int phm_irq_process(struct amdgpu_device *adev, uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) { - if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) + if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) { pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); - else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) + /* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ + dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); + orderly_poweroff(true); + } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), @@ -609,12 +616,18 @@ int phm_irq_process(struct amdgpu_device *adev, PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); } else if (client_id == SOC15_IH_CLIENTID_THM) { - if (src_id == 0) + if (src_id == 0) { pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); - else + /* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ + dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); + orderly_poweroff(true); + } else pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c4d8c52c6b9ca..6c4405622c9bb 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -23,6 +23,7 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/reboot.h>
#include "pp_debug.h" #include "amdgpu.h" @@ -1538,6 +1539,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); + /* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ + dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); + orderly_poweroff(true); break; case THM_11_0__SRCID__THM_DIG_THERM_H2L: pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
[AMD Official Use Only - Internal Distribution Only]
Hi @Sasha Levin @Deucher, Alexander,
The following changes need to be applied also. Otherwise, you may see unexpected shutdown on stress gpu loading on Vega10.
drm/amd/pm: avoid false alarm due to confusing softwareshutdowntemp setting drm/amd/pm: correct the thermal alert temperature limit settings drm/amd/pm: correct Vega20 swctf limit setting drm/amd/pm: correct Vega12 swctf limit setting drm/amd/pm: correct Vega10 swctf limit setting
BR Evan -----Original Message----- From: Sasha Levin sashal@kernel.org Sent: Friday, September 18, 2020 10:00 AM To: linux-kernel@vger.kernel.org; stable@vger.kernel.org Cc: Quan, Evan Evan.Quan@amd.com; Deucher, Alexander Alexander.Deucher@amd.com; Sasha Levin sashal@kernel.org; dri-devel@lists.freedesktop.org Subject: [PATCH AUTOSEL 5.4 265/330] drm/amd/powerplay: try to do a graceful shutdown on SW CTF
From: Evan Quan evan.quan@amd.com
[ Upstream commit 9495220577416632675959caf122e968469ffd16 ]
Normally this(SW CTF) should not happen. And by doing graceful shutdown we can prevent further damage.
Signed-off-by: Evan Quan evan.quan@amd.com Reviewed-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- .../gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 21 +++++++++++++++---- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 7 +++++++ 2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index d09690fca4520..414added3d02c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -22,6 +22,7 @@ */
#include <linux/pci.h> +#include <linux/reboot.h>
#include "hwmgr.h" #include "pp_debug.h" @@ -593,12 +594,18 @@ int phm_irq_process(struct amdgpu_device *adev, uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) { -if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) +if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) { pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); -else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) +/* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ +dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); +orderly_poweroff(true); +} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), @@ -609,12 +616,18 @@ int phm_irq_process(struct amdgpu_device *adev, PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); } else if (client_id == SOC15_IH_CLIENTID_THM) { -if (src_id == 0) +if (src_id == 0) { pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); -else +/* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ +dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); +orderly_poweroff(true); +} else pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c4d8c52c6b9ca..6c4405622c9bb 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -23,6 +23,7 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/reboot.h>
#include "pp_debug.h" #include "amdgpu.h" @@ -1538,6 +1539,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); +/* + * SW CTF just occurred. + * Try to do a graceful shutdown to prevent further damage. + */ +dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); +orderly_poweroff(true); break; case THM_11_0__SRCID__THM_DIG_THERM_H2L: pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", -- 2.25.1
On Fri, Sep 18, 2020 at 3:17 AM Quan, Evan Evan.Quan@amd.com wrote:
[AMD Official Use Only - Internal Distribution Only]
Hi @Sasha Levin @Deucher, Alexander,
The following changes need to be applied also. Otherwise, you may see unexpected shutdown on stress gpu loading on Vega10.
drm/amd/pm: avoid false alarm due to confusing softwareshutdowntemp setting drm/amd/pm: correct the thermal alert temperature limit settings drm/amd/pm: correct Vega20 swctf limit setting drm/amd/pm: correct Vega12 swctf limit setting drm/amd/pm: correct Vega10 swctf limit setting
I would suggest we just drop this patch for kernels prior to 5.8 (where it was introduced).
Alex
BR Evan -----Original Message----- From: Sasha Levin sashal@kernel.org Sent: Friday, September 18, 2020 10:00 AM To: linux-kernel@vger.kernel.org; stable@vger.kernel.org Cc: Quan, Evan Evan.Quan@amd.com; Deucher, Alexander Alexander.Deucher@amd.com; Sasha Levin sashal@kernel.org; dri-devel@lists.freedesktop.org Subject: [PATCH AUTOSEL 5.4 265/330] drm/amd/powerplay: try to do a graceful shutdown on SW CTF
From: Evan Quan evan.quan@amd.com
[ Upstream commit 9495220577416632675959caf122e968469ffd16 ]
Normally this(SW CTF) should not happen. And by doing graceful shutdown we can prevent further damage.
Signed-off-by: Evan Quan evan.quan@amd.com Reviewed-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org
.../gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 21 +++++++++++++++---- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 7 +++++++ 2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index d09690fca4520..414added3d02c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -22,6 +22,7 @@ */
#include <linux/pci.h> +#include <linux/reboot.h>
#include "hwmgr.h" #include "pp_debug.h" @@ -593,12 +594,18 @@ int phm_irq_process(struct amdgpu_device *adev, uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) { -if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) +if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) { pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); -else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) +/*
- SW CTF just occurred.
- Try to do a graceful shutdown to prevent further damage.
- */
+dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); +orderly_poweroff(true); +} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), @@ -609,12 +616,18 @@ int phm_irq_process(struct amdgpu_device *adev, PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); } else if (client_id == SOC15_IH_CLIENTID_THM) { -if (src_id == 0) +if (src_id == 0) { pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); -else +/*
- SW CTF just occurred.
- Try to do a graceful shutdown to prevent further damage.
- */
+dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); +orderly_poweroff(true); +} else pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c4d8c52c6b9ca..6c4405622c9bb 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -23,6 +23,7 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/reboot.h>
#include "pp_debug.h" #include "amdgpu.h" @@ -1538,6 +1539,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, PCI_BUS_NUM(adev->pdev->devfn), PCI_SLOT(adev->pdev->devfn), PCI_FUNC(adev->pdev->devfn)); +/*
- SW CTF just occurred.
- Try to do a graceful shutdown to prevent further damage.
- */
+dev_emerg(adev->dev, "System is going to shutdown due to SW CTF!\n"); +orderly_poweroff(true); break; case THM_11_0__SRCID__THM_DIG_THERM_H2L: pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", -- 2.25.1
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
On Fri, Sep 18, 2020 at 09:57:37AM -0400, Alex Deucher wrote:
On Fri, Sep 18, 2020 at 3:17 AM Quan, Evan Evan.Quan@amd.com wrote:
[AMD Official Use Only - Internal Distribution Only]
Hi @Sasha Levin @Deucher, Alexander,
The following changes need to be applied also. Otherwise, you may see unexpected shutdown on stress gpu loading on Vega10.
drm/amd/pm: avoid false alarm due to confusing softwareshutdowntemp setting drm/amd/pm: correct the thermal alert temperature limit settings drm/amd/pm: correct Vega20 swctf limit setting drm/amd/pm: correct Vega12 swctf limit setting drm/amd/pm: correct Vega10 swctf limit setting
I would suggest we just drop this patch for kernels prior to 5.8 (where it was introduced).
Will do, thanks.
From: Jonathan Bakker xc-racer2@live.ca
[ Upstream commit 7d31676a8d91dd18e08853efd1cb26961a38c6a6 ]
Some variants of the samsung tty driver can pick which clock to use for their baud rate generation. In the DT conversion, a default clock was selected to be used if a specific one wasn't assigned and then a comparison of which clock rate worked better was done. Unfortunately, the comparison was implemented in such a way that only the default clock was ever actually compared. Fix this by iterating through all possible clocks, except when a specific clock has already been picked via clk_sel (which is only possible via board files).
Signed-off-by: Jonathan Bakker xc-racer2@live.ca Reviewed-by: Krzysztof Kozlowski krzk@kernel.org Link: https://lore.kernel.org/r/BN6PR04MB06604E63833EA41837EBF77BA3A30@BN6PR04MB06... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/tty/serial/samsung.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 71f99e9217592..c7683beb3412a 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, struct s3c24xx_uart_info *info = ourport->info; struct clk *clk; unsigned long rate; - unsigned int cnt, baud, quot, clk_sel, best_quot = 0; + unsigned int cnt, baud, quot, best_quot = 0; char clkname[MAX_CLK_NAME_LENGTH]; int calc_deviation, deviation = (1 << 30) - 1;
- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : - ourport->info->def_clk_sel; for (cnt = 0; cnt < info->num_clks; cnt++) { - if (!(clk_sel & (1 << cnt))) + /* Keep selected clock if provided */ + if (ourport->cfg->clk_sel && + !(ourport->cfg->clk_sel & (1 << cnt))) continue;
sprintf(clkname, "clk_uart_baud%d", cnt);
From: Takashi Iwai tiwai@suse.de
[ Upstream commit c637fa151259c0f74665fde7cba5b7eac1417ae5 ]
The unsol event handling code has a loop retrieving the read/write indices and the arrays without locking while the append to the array may happen concurrently. This may lead to some inconsistency. Although there hasn't been any proof of this bad results, it's still safer to protect the racy accesses.
This patch adds the spinlock protection around the unsol handling loop for addressing it. Here we take bus->reg_lock as the writer side snd_hdac_bus_queue_event() is also protected by that lock.
Link: https://lore.kernel.org/r/20200516062556.30951-1-tiwai@suse.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/hda/hdac_bus.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 8f19876244ebe..53be2cac98e7c 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c @@ -158,6 +158,7 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) struct hdac_driver *drv; unsigned int rp, caddr, res;
+ spin_lock_irq(&bus->reg_lock); while (bus->unsol_rp != bus->unsol_wp) { rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; bus->unsol_rp = rp; @@ -169,10 +170,13 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) codec = bus->caddr_tbl[caddr & 0x0f]; if (!codec || !codec->dev.driver) continue; + spin_unlock_irq(&bus->reg_lock); drv = drv_to_hdac_driver(codec->dev.driver); if (drv->unsol_event) drv->unsol_event(codec, res); + spin_lock_irq(&bus->reg_lock); } + spin_unlock_irq(&bus->reg_lock); }
/**
From: Christophe JAILLET christophe.jaillet@wanadoo.fr
[ Upstream commit 547a7348633b1f9923551f94ac3157a613d2c9f2 ]
'exynos_dsi_parse_dt()' takes a reference to 'dsi->in_bridge_node'. This must be released in the error handling path.
In order to do that, add an error handling path and move the 'exynos_dsi_parse_dt()' call from the beginning to the end of the probe function to ease the error handling path. This function only sets some variables which are used only in the 'transfer' function.
The call chain is: .transfer --> exynos_dsi_host_transfer --> exynos_dsi_init --> exynos_dsi_enable_clock (use burst_clk_rate and esc_clk_rate) --> exynos_dsi_set_pll (use pll_clk_rate)
While at it, also handle cases where 'component_add()' fails.
This patch is similar to commit 70505c2ef94b ("drm/exynos: dsi: Remove bridge node reference in removal") which fixed the issue in the remove function.
Signed-off-by: Christophe JAILLET christophe.jaillet@wanadoo.fr Signed-off-by: Inki Dae inki.dae@samsung.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 8ed94c9948008..b83acd696774b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1741,10 +1741,6 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->dev = dev; dsi->driver_data = of_device_get_match_data(dev);
- ret = exynos_dsi_parse_dt(dsi); - if (ret) - return ret; - dsi->supplies[0].supply = "vddcore"; dsi->supplies[1].supply = "vddio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), @@ -1805,11 +1801,25 @@ static int exynos_dsi_probe(struct platform_device *pdev) return ret; }
+ ret = exynos_dsi_parse_dt(dsi); + if (ret) + return ret; + platform_set_drvdata(pdev, &dsi->encoder);
pm_runtime_enable(dev);
- return component_add(dev, &exynos_dsi_component_ops); + ret = component_add(dev, &exynos_dsi_component_ops); + if (ret) + goto err_disable_runtime; + + return 0; + +err_disable_runtime: + pm_runtime_disable(dev); + of_node_put(dsi->in_bridge_node); + + return ret; }
static int exynos_dsi_remove(struct platform_device *pdev)
From: Tang Bin tangbin@cmss.chinamobile.com
[ Upstream commit 49826937e7c7917140515aaf10c17bedcc4acaad ]
If the function platform_get_irq() failed, the negative value returned will not be detected here. So fix error handling in bt_bmc_config_irq(). And in the function bt_bmc_probe(), when get irq failed, it will print error message. So use platform_get_irq_optional() to simplify code. Finally in the function bt_bmc_remove() should make the right status check if get irq failed.
Signed-off-by: Shengju Zhang zhangshengju@cmss.chinamobile.com Signed-off-by: Tang Bin tangbin@cmss.chinamobile.com Message-Id: 20200505102906.17196-1-tangbin@cmss.chinamobile.com [Also set bt_bmc->irq to a negative value if devm_request_irq() fails.] Signed-off-by: Corey Minyard cminyard@mvista.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/char/ipmi/bt-bmc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 40b9927c072c9..89a8faa9b6cfa 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc, struct device *dev = &pdev->dev; int rc;
- bt_bmc->irq = platform_get_irq(pdev, 0); - if (!bt_bmc->irq) - return -ENODEV; + bt_bmc->irq = platform_get_irq_optional(pdev, 0); + if (bt_bmc->irq < 0) + return bt_bmc->irq;
rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED, DEVICE_NAME, bt_bmc); if (rc < 0) { dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq); - bt_bmc->irq = 0; + bt_bmc->irq = rc; return rc; }
@@ -479,7 +479,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc_config_irq(bt_bmc, pdev);
- if (bt_bmc->irq) { + if (bt_bmc->irq >= 0) { dev_info(dev, "Using IRQ %d\n", bt_bmc->irq); } else { dev_info(dev, "No IRQ; using timer\n"); @@ -505,7 +505,7 @@ static int bt_bmc_remove(struct platform_device *pdev) struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
misc_deregister(&bt_bmc->miscdev); - if (!bt_bmc->irq) + if (bt_bmc->irq < 0) del_timer_sync(&bt_bmc->poll_timer); return 0; }
From: Nicholas Piggin npiggin@gmail.com
[ Upstream commit 265d6e588d87194c2fe2d6c240247f0264e0c19b ]
System Reset and Machine Check interrupts that are not recoverable due to being nested or interrupting when RI=0 currently panic. This is not necessary, and can often just kill the current context and recover.
Signed-off-by: Nicholas Piggin npiggin@gmail.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Reviewed-by: Christophe Leroy christophe.leroy@c-s.fr Link: https://lore.kernel.org/r/20200508043408.886394-16-npiggin@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kernel/traps.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 014ff0701f245..9432fc6af28a5 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -510,11 +510,11 @@ out: #ifdef CONFIG_PPC_BOOK3S_64 BUG_ON(get_paca()->in_nmi == 0); if (get_paca()->in_nmi > 1) - nmi_panic(regs, "Unrecoverable nested System Reset"); + die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable System Reset"); + die("Unrecoverable System Reset", regs, SIGABRT);
if (saved_hsrrs) { mtspr(SPRN_HSRR0, hsrr0); @@ -858,7 +858,7 @@ void machine_check_exception(struct pt_regs *regs)
/* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable Machine check"); + die("Unrecoverable Machine check", regs, SIGBUS);
return;
From: Chuck Lever chuck.lever@oracle.com
[ Upstream commit ea740bd5f58e2912e74f401fd01a9d6aa985ca05 ]
Way back when I was writing the RPC/RDMA server-side backchannel code, I misread the TCP backchannel reply handler logic. When svc_tcp_recvfrom() successfully receives a backchannel reply, it does not return -EAGAIN. It sets XPT_DATA and returns zero.
Update svc_rdma_recvfrom() to return zero. Here, XPT_DATA doesn't need to be set again: it is set whenever a new message is received, behind a spin lock in a single threaded context.
Also, if handling the cb reply is not successful, the message is simply dropped. There's no special message framing to deal with as there is in the TCP case.
Now that the handle_bc_reply() return value is ignored, I've removed the dprintk call sites in the error exit of handle_bc_reply() in favor of trace points in other areas that already report the error cases.
Signed-off-by: Chuck Lever chuck.lever@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- include/linux/sunrpc/svc_rdma.h | 5 ++- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 38 ++++++---------------- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 11 +++---- 3 files changed, 17 insertions(+), 37 deletions(-)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 26f282e5e0822..77589ed787f5c 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -154,9 +154,8 @@ struct svc_rdma_send_ctxt { };
/* svc_rdma_backchannel.c */ -extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - __be32 *rdma_resp, - struct xdr_buf *rcvbuf); +extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt);
/* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 325eef1f85824..68d2dcf0a1be1 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -15,26 +15,25 @@ #undef SVCRDMA_BACKCHANNEL_DEBUG
/** - * svc_rdma_handle_bc_reply - Process incoming backchannel reply - * @xprt: controlling backchannel transport - * @rdma_resp: pointer to incoming transport header - * @rcvbuf: XDR buffer into which to decode the reply + * svc_rdma_handle_bc_reply - Process incoming backchannel Reply + * @rqstp: resources for handling the Reply + * @rctxt: Received message * - * Returns: - * %0 if @rcvbuf is filled in, xprt_complete_rqst called, - * %-EAGAIN if server should call ->recvfrom again. */ -int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, - struct xdr_buf *rcvbuf) +void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt) { + struct svc_xprt *sxprt = rqstp->rq_xprt; + struct rpc_xprt *xprt = sxprt->xpt_bc_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); + struct xdr_buf *rcvbuf = &rqstp->rq_arg; struct kvec *dst, *src = &rcvbuf->head[0]; + __be32 *rdma_resp = rctxt->rc_recv_buf; struct rpc_rqst *req; u32 credits; size_t len; __be32 xid; __be32 *p; - int ret;
p = (__be32 *)src->iov_base; len = src->iov_len; @@ -49,14 +48,10 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, __func__, (int)len, p); #endif
- ret = -EAGAIN; - if (src->iov_len < 24) - goto out_shortreply; - spin_lock(&xprt->queue_lock); req = xprt_lookup_rqst(xprt, xid); if (!req) - goto out_notfound; + goto out_unlock;
dst = &req->rq_private_buf.head[0]; memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); @@ -77,25 +72,12 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, spin_unlock(&xprt->transport_lock);
spin_lock(&xprt->queue_lock); - ret = 0; xprt_complete_rqst(req->rq_task, rcvbuf->len); xprt_unpin_rqst(req); rcvbuf->len = 0;
out_unlock: spin_unlock(&xprt->queue_lock); -out: - return ret; - -out_shortreply: - dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n", - xprt, src->iov_len); - goto out; - -out_notfound: - dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", - xprt, be32_to_cpu(xid)); - goto out_unlock; }
/* Send a backwards direction RPC call. diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index d803d814a03ad..fd5c1f1bb9885 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -817,12 +817,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) goto out_drop; rqstp->rq_xprt_hlen = ret;
- if (svc_rdma_is_backchannel_reply(xprt, p)) { - ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, - &rqstp->rq_arg); - svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); - return ret; - } + if (svc_rdma_is_backchannel_reply(xprt, p)) + goto out_backchannel; + svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
p += rpcrdma_fixed_maxsz; @@ -852,6 +849,8 @@ out_postfail: svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return ret;
+out_backchannel: + svc_rdma_handle_bc_reply(rqstp, ctxt); out_drop: svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); return 0;
From: Miklos Szeredi mszeredi@redhat.com
[ Upstream commit 32f98877c57bee6bc27f443a96f49678a2cd6a50 ]
page_count() is unstable. Unless there has been an RCU grace period between when the page was removed from the page cache and now, a speculative reference may exist from the page cache.
Reported-by: Matthew Wilcox willy@infradead.org Signed-off-by: Miklos Szeredi mszeredi@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/fuse/dev.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 06dd38e76c62a..f9022b7028754 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -764,7 +764,6 @@ static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || - page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced |
From: Miklos Szeredi mszeredi@redhat.com
[ Upstream commit 5ddd9ced9aef6cfa76af27d384c17c9e2d610ce8 ]
A GETATTR request can race with FUSE_NOTIFY_INVAL_INODE, resulting in the attribute cache being updated with stale information after the invalidation.
Fix this by bumping the attribute version in fuse_reverse_inval_inode().
Reported-by: Krzysztof Rusek rusek@9livesdata.com Signed-off-by: Miklos Szeredi mszeredi@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/fuse/inode.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 5dca643a257c9..f58ab84b09fb3 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -323,6 +323,8 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, loff_t offset, loff_t len) { + struct fuse_conn *fc = get_fuse_conn_super(sb); + struct fuse_inode *fi; struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; @@ -331,6 +333,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, if (!inode) return -ENOENT;
+ fi = get_fuse_inode(inode); + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + spin_unlock(&fi->lock); + fuse_invalidate_attr(inode); forget_all_cached_acls(inode); if (offset >= 0) {
From: Colin Ian King colin.king@canonical.com
[ Upstream commit a7f40c233a6b0540d28743267560df9cfb571ca9 ]
The comparison of hcd->irq to less than zero for an error check will never be true because hcd->irq is an unsigned int. Fix this by assigning the int retval to the return of platform_get_irq and checking this for the -ve error condition and assigning hcd->irq to retval.
Addresses-Coverity: ("Unsigned compared against 0") Fixes: c856b4b0fdb5 ("USB: EHCI: ehci-mv: fix error handling in mv_ehci_probe()") Signed-off-by: Colin Ian King colin.king@canonical.com Link: https://lore.kernel.org/r/20200515165453.104028-1-colin.king@canonical.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/usb/host/ehci-mv.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 15b2e8910e9b7..b6f196f5e252e 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -156,11 +156,10 @@ static int mv_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(r); hcd->regs = ehci_mv->op_regs;
- hcd->irq = platform_get_irq(pdev, 0); - if (hcd->irq < 0) { - retval = hcd->irq; + retval = platform_get_irq(pdev, 0); + if (retval < 0) goto err_disable_clk; - } + hcd->irq = retval;
ehci = hcd_to_ehci(hcd); ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
From: Suzuki K Poulose suzuki.poulose@arm.com
[ Upstream commit 3f4943d422c5febbb3c764670011a00eb2a86238 ]
etm probe could be deferred due to the dependency in the trace path chain and may be retried. We need to clear the per-cpu etmdrvdata entry for the etm in case of a failure to avoid use-after-free cases as reported below:
KASAN use-after-free bug in etm4_cpu_pm_notify():
[ 8.574566] coresight etm0: CPU0: ETM v4.2 initialized [ 8.581920] BUG: KASAN: use-after-free in etm4_cpu_pm_notify+0x580/0x2024 [ 8.581925] Read of size 8 at addr ffffff813304f8c8 by task swapper/3/0 [ 8.581927] [ 8.581934] CPU: 3 PID: 0 Comm: swapper/3 Tainted: G S W 5.4.28 #314 [ 8.587775] coresight etm1: CPU1: ETM v4.2 initialized [ 8.594195] Call trace: [ 8.594205] dump_backtrace+0x0/0x188 [ 8.594209] show_stack+0x20/0x2c [ 8.594216] dump_stack+0xdc/0x144 [ 8.594227] print_address_description+0x3c/0x494 [ 8.594232] __kasan_report+0x144/0x168 [ 8.601598] coresight etm2: CPU2: ETM v4.2 initialized [ 8.602563] kasan_report+0x10/0x18 [ 8.602568] check_memory_region+0x1a4/0x1b4 [ 8.602572] __kasan_check_read+0x18/0x24 [ 8.602577] etm4_cpu_pm_notify+0x580/0x2024 [ 8.665945] notifier_call_chain+0x5c/0x90 [ 8.670166] __atomic_notifier_call_chain+0x90/0xf8 [ 8.675182] cpu_pm_notify+0x40/0x6c [ 8.678858] cpu_pm_enter+0x38/0x80 [ 8.682451] psci_enter_idle_state+0x34/0x70 [ 8.686844] cpuidle_enter_state+0xb8/0x20c [ 8.691143] cpuidle_enter+0x38/0x4c [ 8.694820] call_cpuidle+0x3c/0x68 [ 8.698408] do_idle+0x1a0/0x280 [ 8.701729] cpu_startup_entry+0x24/0x28 [ 8.705768] secondary_start_kernel+0x15c/0x170 [ 8.710423] [ 8.711972] Allocated by task 242: [ 8.715473] __kasan_kmalloc+0xf0/0x1ac [ 8.719426] kasan_slab_alloc+0x14/0x1c [ 8.723375] __kmalloc_track_caller+0x23c/0x388 [ 8.728040] devm_kmalloc+0x38/0x94 [ 8.731632] etm4_probe+0x48/0x3c8 [ 8.735140] amba_probe+0xbc/0x158 [ 8.738645] really_probe+0x144/0x408 [ 8.742412] driver_probe_device+0x70/0x140 [ 8.746716] __device_attach_driver+0x9c/0x110 [ 8.751287] bus_for_each_drv+0x90/0xd8 [ 8.755236] __device_attach+0xb4/0x164 [ 8.759188] device_initial_probe+0x20/0x2c [ 8.763490] bus_probe_device+0x34/0x94 [ 8.767436] device_add+0x34c/0x3e0 [ 8.771029] amba_device_try_add+0x68/0x440 [ 8.775332] amba_deferred_retry_func+0x48/0xc8 [ 8.779997] process_one_work+0x344/0x648 [ 8.784127] worker_thread+0x2ac/0x47c [ 8.787987] kthread+0x128/0x138 [ 8.791313] ret_from_fork+0x10/0x18 [ 8.794993] [ 8.796532] Freed by task 242: [ 8.799684] __kasan_slab_free+0x15c/0x22c [ 8.803897] kasan_slab_free+0x10/0x1c [ 8.807761] kfree+0x25c/0x4bc [ 8.810913] release_nodes+0x240/0x2b0 [ 8.814767] devres_release_all+0x3c/0x54 [ 8.818887] really_probe+0x178/0x408 [ 8.822661] driver_probe_device+0x70/0x140 [ 8.826963] __device_attach_driver+0x9c/0x110 [ 8.831539] bus_for_each_drv+0x90/0xd8 [ 8.835487] __device_attach+0xb4/0x164 [ 8.839431] device_initial_probe+0x20/0x2c [ 8.843732] bus_probe_device+0x34/0x94 [ 8.847678] device_add+0x34c/0x3e0 [ 8.851274] amba_device_try_add+0x68/0x440 [ 8.855576] amba_deferred_retry_func+0x48/0xc8 [ 8.860240] process_one_work+0x344/0x648 [ 8.864366] worker_thread+0x2ac/0x47c [ 8.868228] kthread+0x128/0x138 [ 8.871557] ret_from_fork+0x10/0x18 [ 8.875231] [ 8.876782] The buggy address belongs to the object at ffffff813304f800 [ 8.876782] which belongs to the cache kmalloc-1k of size 1024 [ 8.889632] The buggy address is located 200 bytes inside of [ 8.889632] 1024-byte region [ffffff813304f800, ffffff813304fc00) [ 8.901761] The buggy address belongs to the page: [ 8.906695] page:ffffffff04ac1200 refcount:1 mapcount:0 mapping:ffffff8146c03800 index:0x0 compound_mapcount: 0 [ 8.917047] flags: 0x4000000000010200(slab|head) [ 8.921799] raw: 4000000000010200 dead000000000100 dead000000000122 ffffff8146c03800 [ 8.929753] raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000 [ 8.937703] page dumped because: kasan: bad access detected [ 8.943433] [ 8.944974] Memory state around the buggy address: [ 8.949903] ffffff813304f780: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 8.957320] ffffff813304f800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 8.964742] >ffffff813304f880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 8.972157] ^ [ 8.977886] ffffff813304f900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 8.985298] ffffff813304f980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 8.992713] ==================================================================
Fixes: f188b5e76aae ("coresight: etm4x: Save/restore state across CPU low power states") Reported-by: Sai Prakash Ranjan saiprakash.ranjan@codeaurora.org Tested-by: Sai Prakash Ranjan saiprakash.ranjan@codeaurora.org Cc: Mathieu Poirier mathieu.poirier@linaro.org Cc: Mike Leach mike.leach@linaro.org Signed-off-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Mathieu Poirier mathieu.poirier@linaro.org Link: https://lore.kernel.org/r/20200518180242.7916-22-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/hwtracing/coresight/coresight-etm4x.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index a128b5063f46c..83dccdeef9069 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1184,6 +1184,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) return 0;
err_arch_supported: + etmdrvdata[drvdata->cpu] = NULL; if (--etm4_count == 0) { cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); if (hp_online)
From: James Morse james.morse@arm.com
[ Upstream commit 8fcc4ae6faf8b455eeef00bc9ae70744e3b0f462 ]
APEI is unable to do all of its error handling work in nmi-context, so it defers non-fatal work onto the irq_work queue. arch_irq_work_raise() sends an IPI to the calling cpu, but this is not guaranteed to be taken before returning to user-space.
Unless the exception interrupted a context with irqs-masked, irq_work_run() can run immediately. Otherwise return -EINPROGRESS to indicate ghes_notify_sea() found some work to do, but it hasn't finished yet.
With this apei_claim_sea() returning '0' means this external-abort was also notification of a firmware-first RAS error, and that APEI has processed the CPER records.
Signed-off-by: James Morse james.morse@arm.com Tested-by: Tyler Baicar baicar@os.amperecomputing.com Acked-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/kernel/acpi.c | 25 +++++++++++++++++++++++++ arch/arm64/mm/fault.c | 12 +++++++----- 2 files changed, 32 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index a100483b47c42..46ec402e97edc 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/irq_work.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <linux/smp.h> @@ -269,6 +270,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) int apei_claim_sea(struct pt_regs *regs) { int err = -ENOENT; + bool return_to_irqs_enabled; unsigned long current_flags;
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) @@ -276,6 +278,12 @@ int apei_claim_sea(struct pt_regs *regs)
current_flags = local_daif_save_flags();
+ /* current_flags isn't useful here as daif doesn't tell us about pNMI */ + return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); + + if (regs) + return_to_irqs_enabled = interrupts_enabled(regs); + /* * SEA can interrupt SError, mask it and describe this as an NMI so * that APEI defers the handling. @@ -284,6 +292,23 @@ int apei_claim_sea(struct pt_regs *regs) nmi_enter(); err = ghes_notify_sea(); nmi_exit(); + + /* + * APEI NMI-like notifications are deferred to irq_work. Unless + * we interrupted irqs-masked code, we can do that now. + */ + if (!err) { + if (return_to_irqs_enabled) { + local_daif_restore(DAIF_PROCCTX_NOIRQ); + __irq_enter(); + irq_work_run(); + __irq_exit(); + } else { + pr_warn_ratelimited("APEI work queued but not completed"); + err = -EINPROGRESS; + } + } + local_daif_restore(current_flags);
return err; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index d26e6cd289539..2a7339aeb1ad4 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -654,11 +654,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
inf = esr_to_fault_info(esr);
- /* - * Return value ignored as we rely on signal merging. - * Future patches will make this more robust. - */ - apei_claim_sea(regs); + if (user_mode(regs) && apei_claim_sea(regs) == 0) { + /* + * APEI claimed this as a firmware-first notification. + * Some processing deferred to task_work before ret_to_user(). + */ + return 0; + }
if (esr & ESR_ELx_FnV) siaddr = NULL;
From: Wei Yongjun weiyongjun1@huawei.com
[ Upstream commit d0b1e4a638d670a09f42017a3e567dc846931ba8 ]
Fix to return negative error code -ENOMEM from create_afu error handling case instead of 0, as done elsewhere in this function.
Link: https://lore.kernel.org/r/20200428141855.88704-1-weiyongjun1@huawei.com Acked-by: Matthew R. Ochs mrochs@linux.ibm.com Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/cxlflash/main.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 93ef97af22df4..67d681c53c295 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3746,6 +3746,7 @@ static int cxlflash_probe(struct pci_dev *pdev, cfg->afu_cookie = cfg->ops->create_afu(pdev); if (unlikely(!cfg->afu_cookie)) { dev_err(dev, "%s: create_afu failed\n", __func__); + rc = -ENOMEM; goto out_remove; }
From: Anshuman Khandual anshuman.khandual@arm.com
[ Upstream commit 1ed1b90a0594c8c9d31e8bb8be25a2b37717dc9e ]
ID_DFR0 based TraceFilt feature should not be exposed to guests. Hence lets drop it.
Cc: Catalin Marinas catalin.marinas@arm.com Cc: Will Deacon will@kernel.org Cc: Marc Zyngier maz@kernel.org Cc: Mark Rutland mark.rutland@arm.com Cc: James Morse james.morse@arm.com Cc: Suzuki K Poulose suzuki.poulose@arm.com Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org
Suggested-by: Mark Rutland mark.rutland@arm.com Signed-off-by: Anshuman Khandual anshuman.khandual@arm.com Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Link: https://lore.kernel.org/r/1589881254-10082-3-git-send-email-anshuman.khandua... Signed-off-by: Will Deacon will@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1df57ffc9314d..f2ec845404149 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -319,7 +319,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = { };
static const struct arm64_ftr_bits ftr_id_dfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + /* [31:28] TraceFilt */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
From: Philip Yang Philip.Yang@amd.com
[ Upstream commit f7646585a30ed8ef5ab300d4dc3b0c1d6afbe71d ]
In free memory of gpu path, remove bo from validate_list to make sure restore worker don't access the BO any more, then unregister bo MMU interval notifier. Otherwise, the restore worker will crash in the middle of validating BO user pages if MMU interval notifer is gone.
Signed-off-by: Philip Yang Philip.Yang@amd.com Reviewed-by: Felix Kuehling Felix.Kuehling@amd.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index edb561baf8b90..f3fa271e3394c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1247,15 +1247,15 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( * be freed anyway */
- /* No more MMU notifiers */ - amdgpu_mn_unregister(mem->bo); - /* Make sure restore workers don't access the BO any more */ bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); list_del(&bo_list_entry->head); mutex_unlock(&process_info->lock);
+ /* No more MMU notifiers */ + amdgpu_mn_unregister(mem->bo); + ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); if (unlikely(ret)) return ret;
From: Alexander Duyck alexander.h.duyck@linux.intel.com
[ Upstream commit 49ee3c2ab5234757bfb56a0b3a3cb422f427e3a3 ]
We are seeing a deadlock in e1000 down when NAPI is being disabled. Looking over the kernel function trace of the system it appears that the interface is being closed and then a reset is hitting which deadlocks the interface as the NAPI interface is already disabled.
To prevent this from happening I am disabling the reset task when __E1000_DOWN is already set. In addition code has been added so that we set the __E1000_DOWN while holding the __E1000_RESET flag in e1000_close in order to guarantee that the reset task will not run after we have started the close call.
Signed-off-by: Alexander Duyck alexander.h.duyck@linux.intel.com Tested-by: Maxim Zhukov mussitantesmortem@gmail.com Signed-off-by: Jeff Kirsher jeffrey.t.kirsher@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/ethernet/intel/e1000/e1000_main.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index f93ed70709c65..a2ee28e487a6f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - e1000_down(adapter); - e1000_up(adapter); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); }
@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT;
- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) usleep_range(10000, 20000);
- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + e1000_down(adapter); e1000_power_down_phy(adapter); e1000_free_irq(adapter);
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 00583fbe8031f69bba8b0a9a861efb75fb7131af ]
pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Ben Skeggs bskeggs@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/nouveau/nouveau_debugfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 5c314f135dd10..3b13feca970f7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -183,8 +183,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, }
ret = pm_runtime_get_sync(drm->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(drm->dev); return ret; + } + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); if (ret < 0)
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit d7372dfb3f7f1602b87e0663e8b8646da23ebca7 ]
pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Ben Skeggs bskeggs@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index fbfe254227740..7d39d4949ee77 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -78,8 +78,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) return ret;
ret = pm_runtime_get_sync(dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev); goto out; + }
ret = nouveau_vma_new(nvbo, vmm, &vma); pm_runtime_mark_last_busy(dev);
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit dc455f4c888365595c0a13da445e092422d55b8d ]
pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Ben Skeggs bskeggs@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 419a02260bfa7..ee2b1e1199e09 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1032,8 +1032,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + }
conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
From: Shreyas Joshi shreyas.joshi@biamp.com
[ Upstream commit 48021f98130880dd74286459a1ef48b5e9bc374f ]
If uboot passes a blank string to console_setup then it results in a trashed memory. Ultimately, the kernel crashes during freeing up the memory.
This fix checks if there is a blank parameter being passed to console_setup from uboot. In case it detects that the console parameter is blank then it doesn't setup the serial device and it gracefully exits.
Link: https://lore.kernel.org/r/20200522065306.83-1-shreyas.joshi@biamp.com Signed-off-by: Shreyas Joshi shreyas.joshi@biamp.com Acked-by: Sergey Senozhatsky sergey.senozhatsky@gmail.com [pmladek@suse.com: Better format the commit message and code, remove unnecessary brackets.] Signed-off-by: Petr Mladek pmladek@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- kernel/printk/printk.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 971197f5d8ee5..5569ef6bc1839 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2193,6 +2193,9 @@ static int __init console_setup(char *str) char *s, *options, *brl_options = NULL; int idx;
+ if (str[0] == 0) + return 1; + if (_braille_console_setup(&str, &brl_options)) return 1;
From: Yu Chen chenyu56@huawei.com
[ Upstream commit 1c0e69ae1b9f9004fd72978612ae3463791edc56 ]
If the SS PHY is in P3, there is no pipe_clk, HW may use suspend_clk for function, as suspend_clk is slow so EP command need more time to complete, e.g, imx8M suspend_clk is 32K, set ep configuration will take about 380us per below trace time stamp(44.286278 - 44.285897 = 0.000381):
configfs_acm.sh-822 [000] d..1 44.285896: dwc3_writel: addr 000000006d59aae1 value 00000401 configfs_acm.sh-822 [000] d..1 44.285897: dwc3_readl: addr 000000006d59aae1 value 00000401 ... ... configfs_acm.sh-822 [000] d..1 44.286278: dwc3_readl: addr 000000006d59aae1 value 00000001 configfs_acm.sh-822 [000] d..1 44.286279: dwc3_gadget_ep_cmd: ep0out: cmd 'Set Endpoint Configuration' [401] params 00001000 00000500 00000000 --> status: Successful
This was originally found on Hisilicon Kirin Soc that need more time for the device controller to clear the CmdAct of DEPCMD.
Signed-off-by: Yu Chen chenyu56@huawei.com Signed-off-by: John Stultz john.stultz@linaro.org Signed-off-by: Li Jun jun.li@nxp.com Signed-off-by: Felipe Balbi balbi@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/usb/dwc3/gadget.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4225544342519..809103254fc64 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; - u32 timeout = 1000; + u32 timeout = 5000; u32 saved_config = 0; u32 reg;
From: David Sterba dsterba@suse.com
[ Upstream commit 7c09c03091ac562ddca2b393e5d65c1d37da79f1 ]
Deleting a subvolume on a full filesystem leads to ENOSPC followed by a forced read-only. This is not a transaction abort and the filesystem is otherwise ok, so the error should be just propagated to the callers.
This is caused by unnecessary call to btrfs_handle_fs_error for all errors, except EAGAIN. This does not make sense as the standard transaction abort mechanism is in btrfs_drop_snapshot so all relevant failures are handled.
Originally in commit cb1b69f4508a ("Btrfs: forced readonly when btrfs_drop_snapshot() fails") there was no return value at all, so the btrfs_std_error made some sense but once the error handling and propagation has been implemented we don't need it anymore.
Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/extent-tree.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 541497036cc24..60c3a03203fae 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5429,8 +5429,6 @@ out: */ if (!for_reloc && !root_dropped) btrfs_add_dead_root(root); - if (err && err != -EAGAIN) - btrfs_handle_fs_error(fs_info, err, NULL); return err; }
From: Omar Sandoval osandov@fb.com
[ Upstream commit c36cac28cb94e58f7e21ff43bdc6064346dab32c ]
In btrfs_submit_direct(), if we fail to allocate the btrfs_dio_private, we complete the ordered extent range. However, we don't mark that the range doesn't need to be cleaned up from btrfs_direct_IO() until later. Therefore, if we fail to allocate the btrfs_dio_private, we complete the ordered extent range twice. We could fix this by updating unsubmitted_oe_range earlier, but it's cleaner to reorganize the code so that creating the btrfs_dio_private and submitting the bios are separate, and once the btrfs_dio_private is created, cleanup always happens through the btrfs_dio_private.
The logic around unsubmitted_oe_range_end and unsubmitted_oe_range_start is really subtle. We have the following:
1. btrfs_direct_IO sets those two to the same value.
2. When we call __blockdev_direct_IO unless btrfs_get_blocks_direct->btrfs_get_blocks_direct_write is called to modify unsubmitted_oe_range_start so that start < end. Cleanup won't happen.
3. We come into btrfs_submit_direct - if it dip allocation fails we'd return with oe_range_end now modified so cleanup will happen.
4. If we manage to allocate the dip we reset the unsubmitted range members to be equal so that cleanup happens from btrfs_endio_direct_write.
This 4-step logic is not really obvious, especially given it's scattered across 3 functions.
Fixes: f28a49287817 ("Btrfs: fix leaking of ordered extents after direct IO write error") Reviewed-by: Johannes Thumshirn johannes.thumshirn@wdc.com Reviewed-by: Nikolay Borisov nborisov@suse.com Signed-off-by: Omar Sandoval osandov@fb.com [ add range start/end logic explanation from Nikolay ] Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/inode.c | 178 +++++++++++++++++++---------------------------- 1 file changed, 70 insertions(+), 108 deletions(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9ac40991a6405..e9787b7b943a2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8586,14 +8586,64 @@ err: return ret; }
-static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) +/* + * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked + * or ordered extents whether or not we submit any bios. + */ +static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, + struct inode *inode, + loff_t file_offset) { - struct inode *inode = dip->inode; + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); + struct btrfs_dio_private *dip; + struct bio *bio; + + dip = kzalloc(sizeof(*dip), GFP_NOFS); + if (!dip) + return NULL; + + bio = btrfs_bio_clone(dio_bio); + bio->bi_private = dip; + btrfs_io_bio(bio)->logical = file_offset; + + dip->private = dio_bio->bi_private; + dip->inode = inode; + dip->logical_offset = file_offset; + dip->bytes = dio_bio->bi_iter.bi_size; + dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; + dip->orig_bio = bio; + dip->dio_bio = dio_bio; + atomic_set(&dip->pending_bios, 1); + + if (write) { + struct btrfs_dio_data *dio_data = current->journal_info; + + /* + * Setting range start and end to the same value means that + * no cleanup will happen in btrfs_direct_IO + */ + dio_data->unsubmitted_oe_range_end = dip->logical_offset + + dip->bytes; + dio_data->unsubmitted_oe_range_start = + dio_data->unsubmitted_oe_range_end; + + bio->bi_end_io = btrfs_endio_direct_write; + } else { + bio->bi_end_io = btrfs_endio_direct_read; + dip->subio_endio = btrfs_subio_endio_read; + } + return dip; +} + +static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, + loff_t file_offset) +{ + const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_dio_private *dip; struct bio *bio; - struct bio *orig_bio = dip->orig_bio; - u64 start_sector = orig_bio->bi_iter.bi_sector; - u64 file_offset = dip->logical_offset; + struct bio *orig_bio; + u64 start_sector; int async_submit = 0; u64 submit_len; int clone_offset = 0; @@ -8602,11 +8652,24 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) blk_status_t status; struct btrfs_io_geometry geom;
+ dip = btrfs_create_dio_private(dio_bio, inode, file_offset); + if (!dip) { + if (!write) { + unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, + file_offset + dio_bio->bi_iter.bi_size - 1); + } + dio_bio->bi_status = BLK_STS_RESOURCE; + dio_end_io(dio_bio); + return; + } + + orig_bio = dip->orig_bio; + start_sector = orig_bio->bi_iter.bi_sector; submit_len = orig_bio->bi_iter.bi_size; ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio), start_sector << 9, submit_len, &geom); if (ret) - return -EIO; + goto out_err;
if (geom.len >= submit_len) { bio = orig_bio; @@ -8669,7 +8732,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) submit: status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit); if (!status) - return 0; + return;
if (bio != orig_bio) bio_put(bio); @@ -8683,107 +8746,6 @@ out_err: */ if (atomic_dec_and_test(&dip->pending_bios)) bio_io_error(dip->orig_bio); - - /* bio_end_io() will handle error, so we needn't return it */ - return 0; -} - -static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, - loff_t file_offset) -{ - struct btrfs_dio_private *dip = NULL; - struct bio *bio = NULL; - struct btrfs_io_bio *io_bio; - bool write = (bio_op(dio_bio) == REQ_OP_WRITE); - int ret = 0; - - bio = btrfs_bio_clone(dio_bio); - - dip = kzalloc(sizeof(*dip), GFP_NOFS); - if (!dip) { - ret = -ENOMEM; - goto free_ordered; - } - - dip->private = dio_bio->bi_private; - dip->inode = inode; - dip->logical_offset = file_offset; - dip->bytes = dio_bio->bi_iter.bi_size; - dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; - bio->bi_private = dip; - dip->orig_bio = bio; - dip->dio_bio = dio_bio; - atomic_set(&dip->pending_bios, 1); - io_bio = btrfs_io_bio(bio); - io_bio->logical = file_offset; - - if (write) { - bio->bi_end_io = btrfs_endio_direct_write; - } else { - bio->bi_end_io = btrfs_endio_direct_read; - dip->subio_endio = btrfs_subio_endio_read; - } - - /* - * Reset the range for unsubmitted ordered extents (to a 0 length range) - * even if we fail to submit a bio, because in such case we do the - * corresponding error handling below and it must not be done a second - * time by btrfs_direct_IO(). - */ - if (write) { - struct btrfs_dio_data *dio_data = current->journal_info; - - dio_data->unsubmitted_oe_range_end = dip->logical_offset + - dip->bytes; - dio_data->unsubmitted_oe_range_start = - dio_data->unsubmitted_oe_range_end; - } - - ret = btrfs_submit_direct_hook(dip); - if (!ret) - return; - - btrfs_io_bio_free_csum(io_bio); - -free_ordered: - /* - * If we arrived here it means either we failed to submit the dip - * or we either failed to clone the dio_bio or failed to allocate the - * dip. If we cloned the dio_bio and allocated the dip, we can just - * call bio_endio against our io_bio so that we get proper resource - * cleanup if we fail to submit the dip, otherwise, we must do the - * same as btrfs_endio_direct_[write|read] because we can't call these - * callbacks - they require an allocated dip and a clone of dio_bio. - */ - if (bio && dip) { - bio_io_error(bio); - /* - * The end io callbacks free our dip, do the final put on bio - * and all the cleanup and final put for dio_bio (through - * dio_end_io()). - */ - dip = NULL; - bio = NULL; - } else { - if (write) - __endio_write_update_ordered(inode, - file_offset, - dio_bio->bi_iter.bi_size, - false); - else - unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, - file_offset + dio_bio->bi_iter.bi_size - 1); - - dio_bio->bi_status = BLK_STS_IOERR; - /* - * Releases and cleans up our dio_bio, no need to bio_put() - * nor bio_endio()/bio_io_error() against dio_bio. - */ - dio_end_io(dio_bio); - } - if (bio) - bio_put(bio); - kfree(dip); }
static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 6f8cd246411575703d9312888b70705c396b53a9 ]
pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Link: https://lore.kernel.org/r/20200522080839.32612-1-dinghao.liu@zju.edu.cn Signed-off-by: Linus Walleij linus.walleij@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpio/gpio-rcar.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 187984d26f47a..f0b6c68e848e3 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -250,8 +250,10 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) int error;
error = pm_runtime_get_sync(p->dev); - if (error < 0) + if (error < 0) { + pm_runtime_put(p->dev); return error; + }
error = pinctrl_gpio_request(chip->base + offset); if (error)
From: Qian Cai cai@lca.pw
[ Upstream commit 1518ac272e789cae8c555d69951b032a275b7602 ]
Finished a qemu-kvm (-device vfio-pci,host=0001:01:00.0) triggers a few memory leaks after a while because vfio_pci_set_ctx_trigger_single() calls eventfd_ctx_fdget() without the matching eventfd_ctx_put() later. Fix it by calling eventfd_ctx_put() for those memory in vfio_pci_release() before vfio_device_release().
unreferenced object 0xebff008981cc2b00 (size 128): comm "qemu-kvm", pid 4043, jiffies 4294994816 (age 9796.310s) hex dump (first 32 bytes): 01 00 00 00 6b 6b 6b 6b 00 00 00 00 ad 4e ad de ....kkkk.....N.. ff ff ff ff 6b 6b 6b 6b ff ff ff ff ff ff ff ff ....kkkk........ backtrace: [<00000000917e8f8d>] slab_post_alloc_hook+0x74/0x9c [<00000000df0f2aa2>] kmem_cache_alloc_trace+0x2b4/0x3d4 [<000000005fcec025>] do_eventfd+0x54/0x1ac [<0000000082791a69>] __arm64_sys_eventfd2+0x34/0x44 [<00000000b819758c>] do_el0_svc+0x128/0x1dc [<00000000b244e810>] el0_sync_handler+0xd0/0x268 [<00000000d495ef94>] el0_sync+0x164/0x180 unreferenced object 0x29ff008981cc4180 (size 128): comm "qemu-kvm", pid 4043, jiffies 4294994818 (age 9796.290s) hex dump (first 32 bytes): 01 00 00 00 6b 6b 6b 6b 00 00 00 00 ad 4e ad de ....kkkk.....N.. ff ff ff ff 6b 6b 6b 6b ff ff ff ff ff ff ff ff ....kkkk........ backtrace: [<00000000917e8f8d>] slab_post_alloc_hook+0x74/0x9c [<00000000df0f2aa2>] kmem_cache_alloc_trace+0x2b4/0x3d4 [<000000005fcec025>] do_eventfd+0x54/0x1ac [<0000000082791a69>] __arm64_sys_eventfd2+0x34/0x44 [<00000000b819758c>] do_el0_svc+0x128/0x1dc [<00000000b244e810>] el0_sync_handler+0xd0/0x268 [<00000000d495ef94>] el0_sync+0x164/0x180
Signed-off-by: Qian Cai cai@lca.pw Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/vfio/pci/vfio_pci.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 0d16f9806655f..12f7691e8b6ca 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -474,6 +474,10 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + if (vdev->err_trigger) + eventfd_ctx_put(vdev->err_trigger); + if (vdev->req_trigger) + eventfd_ctx_put(vdev->req_trigger); }
mutex_unlock(&vdev->reflck->lock);
From: Paul Mackerras paulus@ozlabs.org
[ Upstream commit 11362b1befeadaae4d159a8cddcdaf6b8afe08f9 ]
There is a potential race condition between hypervisor page faults and flushing a memslot. It is possible for a page fault to read the memslot before a memslot is updated and then write a PTE to the partition-scoped page tables after kvmppc_radix_flush_memslot has completed. (Note that this race has never been explicitly observed.)
To close this race, it is sufficient to increment the MMU sequence number while the kvm->mmu_lock is held. That will cause mmu_notifier_retry() to return true, and the page fault will then return to the guest without inserting a PTE.
Signed-off-by: Paul Mackerras paulus@ozlabs.org Signed-off-by: Sasha Levin sashal@kernel.org --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index da8375437d161..9d73448354698 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1104,6 +1104,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, kvm->arch.lpid); gpa += PAGE_SIZE; } + /* + * Increase the mmu notifier sequence number to prevent any page + * fault that read the memslot earlier from writing a PTE. + */ + kvm->mmu_notifier_seq++; spin_unlock(&kvm->mmu_lock); }
From: Arnaldo Carvalho de Melo acme@redhat.com
[ Upstream commit 7fcdccd4237724931d9773d1e3039bfe053a6f52 ]
When applying a patch by Ian I incorrectly converted to zfree() an expression that involved testing some other struct member, not the one being freed, which lead to bugs reproduceable by:
$ perf stat -e i/bs,tsc,L2/o sleep 1 WARNING: multiple event parsing errors Segmentation fault (core dumped) $
Fix it by restoring the test for pos->free_str before freeing pos->val.str, but continue using zfree(&pos->val.str) to set that member to NULL after freeing it.
Reported-by: Ian Rogers irogers@google.com Fixes: e8dfb81838b1 ("perf parse-events: Fix memory leaks found on parse_events") Cc: Adrian Hunter adrian.hunter@intel.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Andi Kleen ak@linux.intel.com Cc: clang-built-linux@googlegroups.com Cc: Jiri Olsa jolsa@kernel.org Cc: Leo Yan leo.yan@linaro.org Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/parse-events.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 5fadad158db59..f16748cfcb262 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1370,7 +1370,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
list_for_each_entry_safe(pos, tmp, &config_terms, list) { list_del_init(&pos->list); - zfree(&pos->val.str); + if (pos->free_str) + zfree(&pos->val.str); free(pos); } return -EINVAL;
From: Ian Rogers irogers@google.com
[ Upstream commit 3efc899d9afb3d03604f191a0be9669eabbfc4aa ]
If allocated, perf_pkg_mask and metric_events need freeing.
Signed-off-by: Ian Rogers irogers@google.com Reviewed-by: Andi Kleen ak@linux.intel.com Cc: Adrian Hunter adrian.hunter@intel.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Jiri Olsa jolsa@redhat.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Link: http://lore.kernel.org/lkml/20200512235918.10732-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/evsel.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 12b1755b136d3..9dd9e3f4ef591 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1255,6 +1255,8 @@ void perf_evsel__exit(struct evsel *evsel) zfree(&evsel->group_name); zfree(&evsel->name); zfree(&evsel->pmu_name); + zfree(&evsel->per_pkg_mask); + zfree(&evsel->metric_events); perf_evsel__object.fini(evsel); }
From: Ian Rogers irogers@google.com
[ Upstream commit 7597ce89b3ed239f7a3408b930d2a6c7a4c938a1 ]
Make the architecture test directory agree with the code comment.
Committer notes:
This was split from a larger patch.
The code was assuming the developer always worked from tools/perf/, so make sure we do the test -d having $toolsdir/perf/arch/$arch, to match the intent expressed in the comment, just above that loop.
Signed-off-by: Ian Rogers irogers@google.com Cc: Adrian Hunter adrian.hunter@intel.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Alexios Zavras alexios.zavras@intel.com Cc: Andi Kleen ak@linux.intel.com Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Igor Lubashev ilubashe@akamai.com Cc: Jiri Olsa jolsa@redhat.com Cc: Kan Liang kan.liang@linux.intel.com Cc: Mark Rutland mark.rutland@arm.com Cc: Mathieu Poirier mathieu.poirier@linaro.org Cc: Namhyung Kim namhyung@kernel.org Cc: Nick Desaulniers ndesaulniers@google.com Cc: Peter Zijlstra peterz@infradead.org Cc: Stephane Eranian eranian@google.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Wei Li liwei391@huawei.com Link: http://lore.kernel.org/lkml/20200306071110.130202-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/trace/beauty/arch_errno_names.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index 22c9fc900c847..f8c44a85650be 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -91,7 +91,7 @@ EoHEADER # in tools/perf/arch archlist="" for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do - test -d arch/$arch && archlist="$archlist $arch" + test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch" done
for arch in x86 $archlist generic; do
From: Jiri Olsa jolsa@kernel.org
[ Upstream commit ea9eb1f456a08c18feb485894185f7a4e31cc8a4 ]
Joakim reported wrong duration_time value for interval bigger than 4000 [1].
The problem is in the interval value we pass to update_stats function, which is typed as 'unsigned int' and overflows when we get over 2^32 (happens between intervals 4000 and 5000).
Retyping the passed value to unsigned long long.
[1] https://www.spinics.net/lists/linux-perf-users/msg11777.html
Fixes: b90f1333ef08 ("perf stat: Update walltime_nsecs_stats in interval mode") Reported-by: Joakim Zhang qiangqing.zhang@nxp.com Signed-off-by: Jiri Olsa jolsa@kernel.org Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Andi Kleen ak@linux.intel.com Cc: Michael Petlan mpetlan@redhat.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Link: http://lore.kernel.org/lkml/20200518131445.3745083-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/builtin-stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 468fc49420ce1..ac2feddc75fdd 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -351,7 +351,7 @@ static void process_interval(void) }
init_stats(&walltime_nsecs_stats); - update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); + update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); print_counters(&rs, 0, NULL); }
From: Xie XiuQi xiexiuqi@huawei.com
[ Upstream commit 07e9a6f538cbeecaf5c55b6f2991416f873cdcbd ]
Need to free "str" before return when asprintf() failed to avoid memory leak.
Signed-off-by: Xie XiuQi xiexiuqi@huawei.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Hongbo Yao yaohongbo@huawei.com Cc: Jiri Olsa jolsa@redhat.com Cc: Li Bin huawei.libin@huawei.com Cc: Mark Rutland mark.rutland@arm.com Cc: Namhyung Kim namhyung@kernel.org Link: http://lore.kernel.org/lkml/20200521133218.30150-4-liwei391@huawei.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 43d1d410854a3..4027906fd3e38 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2788,7 +2788,7 @@ static char *prefix_if_not_in(const char *pre, char *str) return str;
if (asprintf(&n, "%s,%s", pre, str) < 0) - return NULL; + n = NULL;
free(str); return n;
From: Ian Rogers irogers@google.com
[ Upstream commit a159e2fe89b4d1f9fb54b0ae418b961e239bf617 ]
Avoid a simple memory leak.
Signed-off-by: Ian Rogers irogers@google.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Alexei Starovoitov ast@kernel.org Cc: Andi Kleen ak@linux.intel.com Cc: Andrii Nakryiko andriin@fb.com Cc: Cong Wang xiyou.wangcong@gmail.com Cc: Daniel Borkmann daniel@iogearbox.net Cc: Jin Yao yao.jin@linux.intel.com Cc: Jiri Olsa jolsa@redhat.com Cc: John Fastabend john.fastabend@gmail.com Cc: John Garry john.garry@huawei.com Cc: Kajol Jain kjain@linux.ibm.com Cc: Kan Liang kan.liang@linux.intel.com Cc: Kim Phillips kim.phillips@amd.com Cc: Mark Rutland mark.rutland@arm.com Cc: Martin KaFai Lau kafai@fb.com Cc: Namhyung Kim namhyung@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Song Liu songliubraving@fb.com Cc: Stephane Eranian eranian@google.com Cc: Vince Weaver vincent.weaver@maine.edu Cc: Yonghong Song yhs@fb.com Cc: bpf@vger.kernel.org Cc: kp singh kpsingh@chromium.org Cc: netdev@vger.kernel.org Link: http://lore.kernel.org/lkml/20200508053629.210324-10-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/metricgroup.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 940a6e7a68549..7753c3091478a 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -174,6 +174,7 @@ static int metricgroup__setup_events(struct list_head *groups, if (!evsel) { pr_debug("Cannot resolve %s: %s\n", eg->metric_name, eg->metric_expr); + free(metric_events); continue; } for (i = 0; i < eg->idnum; i++) @@ -181,11 +182,13 @@ static int metricgroup__setup_events(struct list_head *groups, me = metricgroup__lookup(metric_events_list, evsel, true); if (!me) { ret = -ENOMEM; + free(metric_events); break; } expr = malloc(sizeof(struct metric_expr)); if (!expr) { ret = -ENOMEM; + free(metric_events); break; } expr->metric_expr = eg->metric_expr;
From: Adrian Hunter adrian.hunter@intel.com
[ Upstream commit 61f82e3fb697a8e85f22fdec786528af73dc36d1 ]
In the absence of any modules, no "modules" map is created, but there are other executable pages to map, due to eBPF JIT, kprobe or ftrace. Map them by recognizing that the first "module" symbol is not necessarily from a module, and adjust the map accordingly.
Signed-off-by: Adrian Hunter adrian.hunter@intel.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Borislav Petkov bp@alien8.de Cc: H. Peter Anvin hpa@zytor.com Cc: Jiri Olsa jolsa@redhat.com Cc: Leo Yan leo.yan@linaro.org Cc: Mark Rutland mark.rutland@arm.com Cc: Masami Hiramatsu mhiramat@kernel.org Cc: Mathieu Poirier mathieu.poirier@linaro.org Cc: Peter Zijlstra peterz@infradead.org Cc: Steven Rostedt (VMware) rostedt@goodmis.org Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-10-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/symbol-elf.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 66f4be1df573e..2ec0a32da5793 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1449,6 +1449,7 @@ struct kcore_copy_info { u64 first_symbol; u64 last_symbol; u64 first_module; + u64 first_module_symbol; u64 last_module_symbol; size_t phnum; struct list_head phdrs; @@ -1525,6 +1526,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, return 0;
if (strchr(name, '[')) { + if (!kci->first_module_symbol || start < kci->first_module_symbol) + kci->first_module_symbol = start; if (start > kci->last_module_symbol) kci->last_module_symbol = start; return 0; @@ -1722,6 +1725,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, kci->etext += page_size; }
+ if (kci->first_module_symbol && + (!kci->first_module || kci->first_module_symbol < kci->first_module)) + kci->first_module = kci->first_module_symbol; + kci->first_module = round_down(kci->first_module, page_size);
if (kci->last_module_symbol) {
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 1c1dbb2c02623db18a50c61b175f19aead800b4e ]
pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Link: https://lore.kernel.org/r/20200521031355.7022-1-dinghao.liu@zju.edu.cn Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Lorenzo Pieralisi lorenzo.pieralisi@arm.com Acked-by: Thierry Reding treding@nvidia.com Acked-by: Vidya Sagar vidyas@nvidia.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pci/controller/dwc/pcie-tegra194.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index f89f5acee72d4..c06b05ab9f787 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -1395,7 +1395,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) ret = pinctrl_pm_select_default_state(dev); if (ret < 0) { dev_err(dev, "Failed to configure sideband pins: %d\n", ret); - goto fail_pinctrl; + goto fail_pm_get_sync; }
tegra_pcie_init_controller(pcie); @@ -1422,9 +1422,8 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
fail_host_init: tegra_pcie_deinit_controller(pcie); -fail_pinctrl: - pm_runtime_put_sync(dev); fail_pm_get_sync: + pm_runtime_put_sync(dev); pm_runtime_disable(dev); return ret; }
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 65bd91dd6957390c42a0491b9622cf31a2cdb140 ]
pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Link: https://lore.kernel.org/r/20200529012230.5863-1-dinghao.liu@zju.edu.cn Signed-off-by: Mark Brown broonie@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- sound/soc/img/img-i2s-out.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c index 4b18534096336..9c4212f2f7269 100644 --- a/sound/soc/img/img-i2s-out.c +++ b/sound/soc/img/img-i2s-out.c @@ -347,8 +347,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + }
img_i2s_out_disable(i2s);
@@ -488,8 +490,10 @@ static int img_i2s_out_probe(struct platform_device *pdev) goto err_pm_disable; } ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto err_suspend; + }
reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK; img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 9604617e998b49f7695fea1479ed82421ef8c9f0 ]
There are two error handling paths in this functon. When wlcore_tx_work_locked() returns an error code, we should decrease the runtime PM usage counter the same way as the error handling path beginning from pm_runtime_get_sync().
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Acked-by: Tony Lindgren tony@atomide.com Signed-off-by: Kalle Valo kvalo@codeaurora.org Link: https://lore.kernel.org/r/20200520124241.9931-1-dinghao.liu@zju.edu.cn Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/ti/wlcore/tx.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 90e56d4c3df3b..e20e18cd04aed 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -863,6 +863,7 @@ void wl1271_tx_work(struct work_struct *work)
ret = wlcore_tx_work_locked(wl); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); wl12xx_queue_recovery_work(wl); goto out; }
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 282a04bf1d8029eb98585cb5db3fd70fe8bc91f7 ]
pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Acked-by: Tony Lindgren tony@atomide.com Signed-off-by: Kalle Valo kvalo@codeaurora.org Link: https://lore.kernel.org/r/20200520124649.10848-1-dinghao.liu@zju.edu.cn Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/ti/wlcore/main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 547ad538d8b66..5f74cf821068d 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -3658,8 +3658,10 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out;
ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(wl->dev); goto out; + }
ret = wlcore_cmd_regdomain_config_locked(wl); if (ret < 0) {
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 550e68ea36a6671a96576c0531685ce6e6c0d19d ]
pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Miquel Raynal miquel.raynal@bootlin.com Link: https://lore.kernel.org/linux-mtd/20200522095139.19653-1-dinghao.liu@zju.edu... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index b9d5d55a5edb9..ef89947ee3191 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this) return ret;
ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(this->dev); return ret; + }
/* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit 37f7212148cf1d796135cdf8d0c7fee13067674b ]
pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Miquel Raynal miquel.raynal@bootlin.com Link: https://lore.kernel.org/linux-mtd/20200522104008.28340-1-dinghao.liu@zju.edu... Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mtd/nand/raw/omap_elm.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 5502ffbdd1e6d..6e0e31eab7cce 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev) < 0) { ret = -EINVAL; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret;
From: Dinghao Liu dinghao.liu@zju.edu.cn
[ Upstream commit fcee90cdf6f3a3a371add04d41528d5ba9c3b411 ]
pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced.
Also, call pm_runtime_disable() when pm_runtime_get_sync() returns an error code.
Link: https://lore.kernel.org/r/20200521024709.2368-1-dinghao.liu@zju.edu.cn Signed-off-by: Dinghao Liu dinghao.liu@zju.edu.cn Signed-off-by: Lorenzo Pieralisi lorenzo.pieralisi@arm.com Acked-by: Thierry Reding treding@nvidia.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/pci/controller/pci-tegra.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index b71e753419c2d..cfa3c83d6cc74 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2768,7 +2768,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(dev, "fail to enable pcie controller: %d\n", err); - goto teardown_msi; + goto pm_runtime_put; }
err = tegra_pcie_request_resources(pcie); @@ -2808,7 +2808,6 @@ free_resources: pm_runtime_put: pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); -teardown_msi: tegra_pcie_msi_teardown(pcie); put_resources: tegra_pcie_put_resources(pcie);
From: Jeff Layton jlayton@kernel.org
[ Upstream commit dc3da0461cc4b76f2d0c5b12247fcb3b520edbbf ]
Nothing ensures that session will still be valid by the time we dereference the pointer. Take and put a reference.
In principle, we should always be able to get a reference here, but throw a warning if that's ever not the case.
Signed-off-by: Jeff Layton jlayton@kernel.org Signed-off-by: Ilya Dryomov idryomov@gmail.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/ceph/caps.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b2695919435e8..af563d73d252c 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2013,12 +2013,24 @@ ack: if (mutex_trylock(&session->s_mutex) == 0) { dout("inverting session/ino locks on %p\n", session); + session = ceph_get_mds_session(session); spin_unlock(&ci->i_ceph_lock); if (took_snap_rwsem) { up_read(&mdsc->snap_rwsem); took_snap_rwsem = 0; } - mutex_lock(&session->s_mutex); + if (session) { + mutex_lock(&session->s_mutex); + ceph_put_mds_session(session); + } else { + /* + * Because we take the reference while + * holding the i_ceph_lock, it should + * never be NULL. Throw a warning if it + * ever is. + */ + WARN_ON_ONCE(true); + } goto retry; } }
From: Qian Cai cai@lca.pw
[ Upstream commit d6c1f098f2a7ba62627c9bc17cda28f534ef9e4a ]
"prev_offset" is a static variable in swapin_nr_pages() that can be accessed concurrently with only mmap_sem held in read mode as noticed by KCSAN,
BUG: KCSAN: data-race in swap_cluster_readahead / swap_cluster_readahead
write to 0xffffffff92763830 of 8 bytes by task 14795 on cpu 17: swap_cluster_readahead+0x2a6/0x5e0 swapin_readahead+0x92/0x8dc do_swap_page+0x49b/0xf20 __handle_mm_fault+0xcfb/0xd70 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x715 page_fault+0x34/0x40
1 lock held by (dnf)/14795: #0: ffff897bd2e98858 (&mm->mmap_sem#2){++++}-{3:3}, at: do_page_fault+0x143/0x715 do_user_addr_fault at arch/x86/mm/fault.c:1405 (inlined by) do_page_fault at arch/x86/mm/fault.c:1535 irq event stamp: 83493 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x365/0x589 irq_exit+0xa2/0xc0
read to 0xffffffff92763830 of 8 bytes by task 1 on cpu 22: swap_cluster_readahead+0xfd/0x5e0 swapin_readahead+0x92/0x8dc do_swap_page+0x49b/0xf20 __handle_mm_fault+0xcfb/0xd70 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x715 page_fault+0x34/0x40
1 lock held by systemd/1: #0: ffff897c38f14858 (&mm->mmap_sem#2){++++}-{3:3}, at: do_page_fault+0x143/0x715 irq event stamp: 43530289 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x365/0x589 irq_exit+0xa2/0xc0
Signed-off-by: Qian Cai cai@lca.pw Signed-off-by: Andrew Morton akpm@linux-foundation.org Cc: Marco Elver elver@google.com Cc: Hugh Dickins hughd@google.com Link: http://lkml.kernel.org/r/20200402213748.2237-1-cai@lca.pw Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/swap_state.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/mm/swap_state.c b/mm/swap_state.c index 4ce014dc4571a..7c434fcfff0dd 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -511,10 +511,11 @@ static unsigned long swapin_nr_pages(unsigned long offset) return 1;
hits = atomic_xchg(&swapin_readahead_hits, 0); - pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, + pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, + max_pages, atomic_read(&last_readahead_pages)); if (!hits) - prev_offset = offset; + WRITE_ONCE(prev_offset, offset); atomic_set(&last_readahead_pages, pages);
return pages;
From: Johannes Weiner hannes@cmpxchg.org
[ Upstream commit abb242f57196dbaa108271575353a0453f6834ef ]
The move_lock is a per-memcg lock, but the VM accounting code that needs to acquire it comes from the page and follows page->mem_cgroup under RCU protection. That means that the page becomes unlocked not when we drop the move_lock, but when we update page->mem_cgroup. And that assignment doesn't imply any memory ordering. If that pointer write gets reordered against the reads of the page state - page_mapped, PageDirty etc. the state may change while we rely on it being stable and we can end up corrupting the counters.
Place an SMP memory barrier to make sure we're done with all page state by the time the new page->mem_cgroup becomes visible.
Also replace the open-coded move_lock with a lock_page_memcg() to make it more obvious what we're serializing against.
Signed-off-by: Johannes Weiner hannes@cmpxchg.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Joonsoo Kim iamjoonsoo.kim@lge.com Reviewed-by: Shakeel Butt shakeelb@google.com Cc: Alex Shi alex.shi@linux.alibaba.com Cc: Hugh Dickins hughd@google.com Cc: "Kirill A. Shutemov" kirill@shutemov.name Cc: Michal Hocko mhocko@suse.com Cc: Roman Gushchin guro@fb.com Cc: Balbir Singh bsingharora@gmail.com Link: http://lkml.kernel.org/r/20200508183105.225460-3-hannes@cmpxchg.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- mm/memcontrol.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 402c8bc65e08d..ca1632850fb76 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5489,7 +5489,6 @@ static int mem_cgroup_move_account(struct page *page, { struct lruvec *from_vec, *to_vec; struct pglist_data *pgdat; - unsigned long flags; unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; int ret; bool anon; @@ -5516,18 +5515,13 @@ static int mem_cgroup_move_account(struct page *page, from_vec = mem_cgroup_lruvec(pgdat, from); to_vec = mem_cgroup_lruvec(pgdat, to);
- spin_lock_irqsave(&from->move_lock, flags); + lock_page_memcg(page);
if (!anon && page_mapped(page)) { __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages); }
- /* - * move_lock grabbed above and caller set from->moving_account, so - * mod_memcg_page_state will serialize updates to PageDirty. - * So mapping should be stable for dirty pages. - */ if (!anon && PageDirty(page)) { struct address_space *mapping = page_mapping(page);
@@ -5543,15 +5537,23 @@ static int mem_cgroup_move_account(struct page *page, }
/* + * All state has been migrated, let's switch to the new memcg. + * * It is safe to change page->mem_cgroup here because the page - * is referenced, charged, and isolated - we can't race with - * uncharging, charging, migration, or LRU putback. + * is referenced, charged, isolated, and locked: we can't race + * with (un)charging, migration, LRU putback, or anything else + * that would rely on a stable page->mem_cgroup. + * + * Note that lock_page_memcg is a memcg lock, not a page lock, + * to save space. As soon as we switch page->mem_cgroup to a + * new memcg that isn't locked, the above state can change + * concurrently again. Make sure we're truly done with it. */ + smp_mb();
- /* caller should have done css_get */ - page->mem_cgroup = to; + page->mem_cgroup = to; /* caller should have done css_get */
- spin_unlock_irqrestore(&from->move_lock, flags); + __unlock_page_memcg(from);
ret = 0;
From: Madhuparna Bhowmik madhuparnabhowmik10@gmail.com
[ Upstream commit e1c3cdb26ab881b77486dc50370356a349077c74 ]
Fields of md(mport_dev) are set after cdev_device_add(). However, the file operation callbacks can be called after cdev_device_add() and therefore accesses to fields of md in the callbacks can race with the rest of the mport_cdev_add() function.
One such example is INIT_LIST_HEAD(&md->portwrites) in mport_cdev_add(), the list is initialised after cdev_device_add(). This can race with list_add_tail(&pw_filter->md_node,&md->portwrites) in rio_mport_add_pw_filter() which is called by unlocked_ioctl.
To avoid such data races use cdev_device_add() after initializing md.
Found by Linux Driver Verification project (linuxtesting.org).
Signed-off-by: Madhuparna Bhowmik madhuparnabhowmik10@gmail.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Acked-by: Alexandre Bounine alex.bou9@gmail.com Cc: Matt Porter mporter@kernel.crashing.org Cc: Dan Carpenter dan.carpenter@oracle.com Cc: Mike Marshall hubcap@omnibond.com Cc: Thomas Gleixner tglx@linutronix.de Cc: Ira Weiny ira.weiny@intel.com Cc: Allison Randal allison@lohutok.net Cc: Pavel Andrianov andrianov@ispras.ru Link: http://lkml.kernel.org/r/20200426112950.1803-1-madhuparnabhowmik10@gmail.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/rapidio/devices/rio_mport_cdev.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 10af330153b5e..0b85a80ae7ef6 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2384,13 +2384,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) cdev_init(&md->cdev, &mport_fops); md->cdev.owner = THIS_MODULE;
- ret = cdev_device_add(&md->cdev, &md->dev); - if (ret) { - rmcd_error("Failed to register mport %d (err=%d)", - mport->id, ret); - goto err_cdev; - } - INIT_LIST_HEAD(&md->doorbells); spin_lock_init(&md->db_lock); INIT_LIST_HEAD(&md->portwrites); @@ -2410,6 +2403,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; #endif + + ret = cdev_device_add(&md->cdev, &md->dev); + if (ret) { + rmcd_error("Failed to register mport %d (err=%d)", + mport->id, ret); + goto err_cdev; + } ret = rio_query_mport(mport, &attr); if (!ret) { md->properties.flags = attr.flags;
From: Boris Brezillon boris.brezillon@collabora.com
[ Upstream commit eb13fa0227417e84aecc3bd9c029d376e33474d3 ]
Looks like some drivers define MTD names with a colon in it, thus making mtdpart= parsing impossible. Let's fix the parser to gracefully handle that case: the last ':' in a partition definition sequence is considered instead of the first one.
Signed-off-by: Boris Brezillon boris.brezillon@collabora.com Signed-off-by: Ron Minnich rminnich@google.com Tested-by: Ron Minnich rminnich@google.com Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mtd/parsers/cmdlinepart.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c index c86f2db8c882d..0625b25620ca7 100644 --- a/drivers/mtd/parsers/cmdlinepart.c +++ b/drivers/mtd/parsers/cmdlinepart.c @@ -218,12 +218,29 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id; + char *p, *mtd_id, *semicol; + + /* + * Replace the first ';' by a NULL char so strrchr can work + * properly. + */ + semicol = strchr(s, ';'); + if (semicol) + *semicol = '\0';
mtd_id = s;
- /* fetch <mtd-id> */ - p = strchr(s, ':'); + /* + * fetch <mtd-id>. We use strrchr to ignore all ':' that could + * be present in the MTD name, only the last one is interpreted + * as an <mtd-id>/<part-definition> separator. + */ + p = strrchr(s, ':'); + + /* Restore the ';' now. */ + if (semicol) + *semicol = ';'; + if (!p) { pr_err("no mtd-id\n"); return -EINVAL;
From: Thomas Gleixner tglx@linutronix.de
[ Upstream commit a7ef9ba986b5fae9d80f8a7b31db0423687efe4e ]
Prevent the compiler from uninlining and creating traceable/probable functions as this is invoked _after_ context tracking switched to CONTEXT_USER and rcu idle.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Reviewed-by: Alexandre Chartre alexandre.chartre@oracle.com Acked-by: Peter Zijlstra peterz@infradead.org Link: https://lkml.kernel.org/r/20200505134340.902709267@linutronix.de Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/include/asm/nospec-branch.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 5c24a7b351665..b222a35959467 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -320,7 +320,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ -static inline void mds_clear_cpu_buffers(void) +static __always_inline void mds_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS;
@@ -341,7 +341,7 @@ static inline void mds_clear_cpu_buffers(void) * * Clear CPU buffers if the corresponding static key is enabled */ -static inline void mds_user_clear_cpu_buffers(void) +static __always_inline void mds_user_clear_cpu_buffers(void) { if (static_branch_likely(&mds_user_clear)) mds_clear_cpu_buffers();
From: Chuck Lever chuck.lever@oracle.com
[ Upstream commit 5be5945864ea143fda628e8179c8474457af1f43 ]
When sunrpc trace points are not enabled, the recorded task ID information alone is not helpful.
Signed-off-by: Chuck Lever chuck.lever@oracle.com Signed-off-by: Anna Schumaker Anna.Schumaker@Netapp.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/nfs/nfstrace.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 361cc10d6f95d..c8081d2b4166a 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -1147,7 +1147,12 @@ TRACE_EVENT(nfs_xdr_status, __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) + __field(int, version) __field(unsigned long, error) + __string(program, + xdr->rqst->rq_task->tk_client->cl_program->name) + __string(procedure, + xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) ),
TP_fast_assign( @@ -1157,13 +1162,19 @@ TRACE_EVENT(nfs_xdr_status, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(rqstp->rq_xid); + __entry->version = task->tk_client->cl_vers; __entry->error = error; + __assign_str(program, + task->tk_client->cl_program->name) + __assign_str(procedure, task->tk_msg.rpc_proc->p_name) ),
TP_printk( - "task:%u@%d xid=0x%08x error=%ld (%s)", + "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)", __entry->task_id, __entry->client_id, __entry->xid, - -__entry->error, nfs_show_status(__entry->error) + __get_str(program), __entry->version, + __get_str(procedure), -__entry->error, + nfs_show_status(__entry->error) ) );
From: Alex Williamson alex.williamson@redhat.com
[ Upstream commit 5c5866c593bbd444d0339ede6a8fb5f14ff66d72 ]
The next use of the device will generate an underflow from the stale reference.
Cc: Qian Cai cai@lca.pw Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx") Reported-by: Daniel Wagner dwagner@suse.de Reviewed-by: Cornelia Huck cohuck@redhat.com Tested-by: Daniel Wagner dwagner@suse.de Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/vfio/pci/vfio_pci.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 12f7691e8b6ca..b669be5a20066 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -474,10 +474,14 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); - if (vdev->err_trigger) + if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); - if (vdev->req_trigger) + vdev->err_trigger = NULL; + } + if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } }
mutex_unlock(&vdev->reflck->lock);
From: Zhang Xiaoxu zhangxiaoxu5@huawei.com
[ Upstream commit 95a3d8f3af9b0d63b43f221b630beaab9739d13a ]
When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]---
Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE
So, we should not do the next loop when any page add to page cache failed.
Reported-by: Hulk Robot hulkci@huawei.com Signed-off-by: Zhang Xiaoxu zhangxiaoxu5@huawei.com Signed-off-by: Steve French stfrench@microsoft.com Acked-by: Ronnie Sahlberg lsahlber@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/cifs/file.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 14ae341755d47..31d578739341b 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4269,7 +4269,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break;
__SetPageLocked(page); - if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { + rc = add_to_page_cache_locked(page, mapping, page->index, gfp); + if (rc) { __ClearPageLocked(page); break; } @@ -4285,6 +4286,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc; + int err = 0; struct list_head tmplist; struct cifsFileInfo *open_file = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); @@ -4329,7 +4331,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * the order of declining indexes. When we put the pages in * the rdata->pages, then we want them in increasing order. */ - while (!list_empty(page_list)) { + while (!list_empty(page_list) && !err) { unsigned int i, nr_pages, bytes, rsize; loff_t offset; struct page *page, *tpage; @@ -4362,9 +4364,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, return 0; }
- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, + nr_pages = 0; + err = readpages_get_pages(mapping, page_list, rsize, &tmplist, &nr_pages, &offset, &bytes); - if (rc) { + if (!nr_pages) { add_credits_and_wake_if(server, credits, 0); break; }
From: Sagi Grimberg sagi@grimberg.me
[ Upstream commit 3b4b19721ec652ad2c4fe51dfbe5124212b5f581 ]
Revert fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk in nvme_validate_ns")
When adding a new namespace to the head disk (via nvme_mpath_set_live) we will see partition scan which triggers I/O on the mpath device node. This process will usually be triggered from the scan_work which holds the scan_lock. If I/O blocks (if we got ana change currently have only available paths but none are accessible) this can deadlock on the head disk bd_mutex as both partition scan I/O takes it, and head disk revalidation takes it to check for resize (also triggered from scan_work on a different path). See trace [1].
The mpath disk revalidation was originally added to detect online disk size change, but this is no longer needed since commit cb224c3af4df ("nvme: Convert to use set_capacity_revalidate_and_notify") which already updates resize info without unnecessarily revalidating the disk (the mpath disk doesn't even implement .revalidate_disk fop).
[1]: -- kernel: INFO: task kworker/u65:9:494 blocked for more than 241 seconds. kernel: Tainted: G OE 5.3.5-050305-generic #201910071830 kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kernel: kworker/u65:9 D 0 494 2 0x80004000 kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core] kernel: Call Trace: kernel: __schedule+0x2b9/0x6c0 kernel: schedule+0x42/0xb0 kernel: schedule_preempt_disabled+0xe/0x10 kernel: __mutex_lock.isra.0+0x182/0x4f0 kernel: __mutex_lock_slowpath+0x13/0x20 kernel: mutex_lock+0x2e/0x40 kernel: revalidate_disk+0x63/0xa0 kernel: __nvme_revalidate_disk+0xfe/0x110 [nvme_core] kernel: nvme_revalidate_disk+0xa4/0x160 [nvme_core] kernel: ? evict+0x14c/0x1b0 kernel: revalidate_disk+0x2b/0xa0 kernel: nvme_validate_ns+0x49/0x940 [nvme_core] kernel: ? blk_mq_free_request+0xd2/0x100 kernel: ? __nvme_submit_sync_cmd+0xbe/0x1e0 [nvme_core] kernel: nvme_scan_work+0x24f/0x380 [nvme_core] kernel: process_one_work+0x1db/0x380 kernel: worker_thread+0x249/0x400 kernel: kthread+0x104/0x140 kernel: ? process_one_work+0x380/0x380 kernel: ? kthread_park+0x80/0x80 kernel: ret_from_fork+0x1f/0x40 ... kernel: INFO: task kworker/u65:1:2630 blocked for more than 241 seconds. kernel: Tainted: G OE 5.3.5-050305-generic #201910071830 kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kernel: kworker/u65:1 D 0 2630 2 0x80004000 kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core] kernel: Call Trace: kernel: __schedule+0x2b9/0x6c0 kernel: schedule+0x42/0xb0 kernel: io_schedule+0x16/0x40 kernel: do_read_cache_page+0x438/0x830 kernel: ? __switch_to_asm+0x34/0x70 kernel: ? file_fdatawait_range+0x30/0x30 kernel: read_cache_page+0x12/0x20 kernel: read_dev_sector+0x27/0xc0 kernel: read_lba+0xc1/0x220 kernel: ? kmem_cache_alloc_trace+0x19c/0x230 kernel: efi_partition+0x1e6/0x708 kernel: ? vsnprintf+0x39e/0x4e0 kernel: ? snprintf+0x49/0x60 kernel: check_partition+0x154/0x244 kernel: rescan_partitions+0xae/0x280 kernel: __blkdev_get+0x40f/0x560 kernel: blkdev_get+0x3d/0x140 kernel: __device_add_disk+0x388/0x480 kernel: device_add_disk+0x13/0x20 kernel: nvme_mpath_set_live+0x119/0x140 [nvme_core] kernel: nvme_update_ns_ana_state+0x5c/0x60 [nvme_core] kernel: nvme_set_ns_ana_state+0x1e/0x30 [nvme_core] kernel: nvme_parse_ana_log+0xa1/0x180 [nvme_core] kernel: ? nvme_update_ns_ana_state+0x60/0x60 [nvme_core] kernel: nvme_mpath_add_disk+0x47/0x90 [nvme_core] kernel: nvme_validate_ns+0x396/0x940 [nvme_core] kernel: ? blk_mq_free_request+0xd2/0x100 kernel: nvme_scan_work+0x24f/0x380 [nvme_core] kernel: process_one_work+0x1db/0x380 kernel: worker_thread+0x249/0x400 kernel: kthread+0x104/0x140 kernel: ? process_one_work+0x380/0x380 kernel: ? kthread_park+0x80/0x80 kernel: ret_from_fork+0x1f/0x40 --
Fixes: fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk in nvme_validate_ns") Signed-off-by: Anton Eidelman anton@lightbitslabs.com Signed-off-by: Sagi Grimberg sagi@grimberg.me Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 1 - 1 file changed, 1 deletion(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f01fe2d910b54..bbf52e88f045a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1864,7 +1864,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); - revalidate_disk(ns->head->disk); } #endif }
From: Markus Theil markus.theil@tu-ilmenau.de
[ Upstream commit 5af7fef39d7952c0f5551afa7b821ee7b6c9dd3d ]
When using 802.1X over mesh networks, at first an ordinary mesh peering is established, then the 802.1X EAPOL dialog happens, afterwards an authenticated mesh peering exchange (AMPE) happens, finally the peering is complete and we can set the STA authorized flag.
As 802.1X is an intermediate step here and key material is not yet exchanged for stations we have to skip mesh path lookup for these EAPOL frames. Otherwise the already configure mesh group encryption key would be used to send a mesh path request which no one can decipher, because we didn't already establish key material on both peers, like with SAE and directly using AMPE.
Signed-off-by: Markus Theil markus.theil@tu-ilmenau.de Link: https://lore.kernel.org/r/20200617082637.22670-2-markus.theil@tu-ilmenau.de [remove pointless braces, remove unnecessary local variable, the list can only process one such frame (or its fragments)] Signed-off-by: Johannes Berg johannes.berg@intel.com Signed-off-by: Sasha Levin sashal@kernel.org --- net/mac80211/tx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 30201aeb426cf..f029e75ec815a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3913,6 +3913,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, skb->prev = NULL; skb->next = NULL;
+ if (skb->protocol == sdata->control_port_protocol) + ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; + skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, ctrl_flags); if (IS_ERR(skb)) @@ -5096,7 +5099,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, return -EINVAL;
if (proto == sdata->control_port_protocol) - ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
if (unencrypted) flags = IEEE80211_TX_INTFL_DONT_ENCRYPT;
From: Javed Hasan jhasan@marvell.com
[ Upstream commit 71f2bf85e90d938d4a9ef9dd9bfa8d9b0b6a03f7 ]
Handling of extra kref which is done by lookup table in case rdata is already present in list.
This issue was leading to memory leak. Trace from KMEMLEAK tool:
unreferenced object 0xffff8888259e8780 (size 512): comm "kworker/2:1", pid 182614, jiffies 4433237386 (age 113021.971s) hex dump (first 32 bytes): 58 0a ec cf 83 88 ff ff 00 00 00 00 00 00 00 00 01 00 00 00 08 00 00 00 13 7d f0 1e 0e 00 00 10 backtrace: [<000000006b25760f>] fc_rport_recv_req+0x3c6/0x18f0 [libfc] [<00000000f208d994>] fc_lport_recv_els_req+0x120/0x8a0 [libfc] [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc] [<00000000ad5be37b>] qedf_ll2_process_skb+0x73d/0xad0 [qedf] [<00000000e0eb6893>] process_one_work+0x382/0x6c0 [<000000002dfd9e21>] worker_thread+0x57/0x5c0 [<00000000b648204f>] kthread+0x1a0/0x1c0 [<0000000072f5ab20>] ret_from_fork+0x35/0x40 [<000000001d5c05d8>] 0xffffffffffffffff
Below is the log sequence which leads to memory leak. Here we get the nested "Received PLOGI request" for same port and this request leads to call the fc_rport_create() twice for the same rport.
kernel: host1: rport fffce5: Received PLOGI request kernel: host1: rport fffce5: Received PLOGI in INIT state kernel: host1: rport fffce5: Port is Ready kernel: host1: rport fffce5: Received PRLI request while in state Ready kernel: host1: rport fffce5: PRLI rspp type 8 active 1 passive 0 kernel: host1: rport fffce5: Received LOGO request while in state Ready kernel: host1: rport fffce5: Delete port kernel: host1: rport fffce5: Received PLOGI request kernel: host1: rport fffce5: Received PLOGI in state Delete - send busy
Link: https://lore.kernel.org/r/20200622101212.3922-2-jhasan@marvell.com Reviewed-by: Girish Basrur gbasrur@marvell.com Reviewed-by: Saurav Kashyap skashyap@marvell.com Reviewed-by: Shyam Sundar ssundar@marvell.com Signed-off-by: Javed Hasan jhasan@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/libfc/fc_rport.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 6bb8917b99a19..aabf51df3c02f 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id); - if (rdata) + if (rdata) { + kref_put(&rdata->kref, fc_rport_destroy); return rdata; + }
if (lport->rport_priv_size > 0) rport_priv_size = lport->rport_priv_size;
From: Javed Hasan jhasan@marvell.com
[ Upstream commit 823a65409c8990f64c5693af98ce0e7819975cba ]
When an rport event (RPORT_EV_READY) is updated without work being queued, avoid taking an additional reference.
This issue was leading to memory leak. Trace from KMEMLEAK tool:
unreferenced object 0xffff8888259e8780 (size 512): comm "kworker/2:1", jiffies 4433237386 (age 113021.971s) hex dump (first 32 bytes): 58 0a ec cf 83 88 ff ff 00 00 00 00 00 00 00 00 01 00 00 00 08 00 00 00 13 7d f0 1e 0e 00 00 10 backtrace: [<000000006b25760f>] fc_rport_recv_req+0x3c6/0x18f0 [libfc] [<00000000f208d994>] fc_lport_recv_els_req+0x120/0x8a0 [libfc] [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc] [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc] [<00000000ad5be37b>] qedf_ll2_process_skb+0x73d/0xad0 [qedf] [<00000000e0eb6893>] process_one_work+0x382/0x6c0 [<000000002dfd9e21>] worker_thread+0x57/0x5c0 [<00000000b648204f>] kthread+0x1a0/0x1c0 [<0000000072f5ab20>] ret_from_fork+0x35/0x40 [<000000001d5c05d8>] 0xffffffffffffffff
Below is the log sequence which leads to memory leak. Here we get the RPORT_EV_READY and RPORT_EV_STOP back to back, which lead to overwrite the event RPORT_EV_READY by event RPORT_EV_STOP. Because of this, kref_count gets incremented by 1.
kernel: host0: rport fffce5: Received PLOGI request kernel: host0: rport fffce5: Received PLOGI in INIT state kernel: host0: rport fffce5: Port is Ready kernel: host0: rport fffce5: Received PRLI request while in state Ready kernel: host0: rport fffce5: PRLI rspp type 8 active 1 passive 0 kernel: host0: rport fffce5: Received LOGO request while in state Ready kernel: host0: rport fffce5: Delete port kernel: host0: rport fffce5: Received PLOGI request kernel: host0: rport fffce5: Received PLOGI in state Delete - send busy kernel: host0: rport fffce5: work event 3 kernel: host0: rport fffce5: lld callback ev 3 kernel: host0: rport fffce5: work delete
Link: https://lore.kernel.org/r/20200626094959.32151-1-jhasan@marvell.com Reviewed-by: Girish Basrur gbasrur@marvell.com Reviewed-by: Saurav Kashyap skashyap@marvell.com Reviewed-by: Shyam Sundar ssundar@marvell.com Signed-off-by: Javed Hasan jhasan@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/libfc/fc_rport.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index aabf51df3c02f..64500417c22ea 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -483,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- kref_get(&rdata->kref); - if (rdata->event == RPORT_EV_NONE && - !queue_work(rport_event_queue, &rdata->event_work)) - kref_put(&rdata->kref, fc_rport_destroy); + if (rdata->event == RPORT_EV_NONE) { + kref_get(&rdata->kref); + if (!queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + }
rdata->event = event; }
From: Andy Lutomirski luto@kernel.org
[ Upstream commit a61fa2799ef9bf6c4f54cf7295036577cececc72 ]
Clear the weird flags before logging to improve strace output -- logging results while, say, TF is set does no one any favors.
Signed-off-by: Andy Lutomirski luto@kernel.org Signed-off-by: Thomas Gleixner tglx@linutronix.de Link: https://lkml.kernel.org/r/907bfa5a42d4475b8245e18b67a04b13ca51ffdb.159319197... Signed-off-by: Sasha Levin sashal@kernel.org --- tools/testing/selftests/x86/syscall_nt.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c index 02309a1950413..a765f62ee7668 100644 --- a/tools/testing/selftests/x86/syscall_nt.c +++ b/tools/testing/selftests/x86/syscall_nt.c @@ -59,6 +59,7 @@ static void do_it(unsigned long extraflags) set_eflags(get_eflags() | extraflags); syscall(SYS_getpid); flags = get_eflags(); + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); if ((flags & extraflags) == extraflags) { printf("[OK]\tThe syscall worked and flags are still set\n"); } else {
From: Zeng Tao prime.zeng@hisilicon.com
[ Upstream commit b872d0640840018669032b20b6375a478ed1f923 ]
The vfio_pci_release call will free and clear the error and request eventfd ctx while these ctx could be in use at the same time in the function like vfio_pci_request, and it's expected to protect them under the vdev->igate mutex, which is missing in vfio_pci_release.
This issue is introduced since commit 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx"),and since commit 5c5866c593bb ("vfio/pci: Clear error and request eventfd ctx after releasing"), it's very easily to trigger the kernel panic like this:
[ 9513.904346] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008 [ 9513.913091] Mem abort info: [ 9513.915871] ESR = 0x96000006 [ 9513.918912] EC = 0x25: DABT (current EL), IL = 32 bits [ 9513.924198] SET = 0, FnV = 0 [ 9513.927238] EA = 0, S1PTW = 0 [ 9513.930364] Data abort info: [ 9513.933231] ISV = 0, ISS = 0x00000006 [ 9513.937048] CM = 0, WnR = 0 [ 9513.940003] user pgtable: 4k pages, 48-bit VAs, pgdp=0000007ec7d12000 [ 9513.946414] [0000000000000008] pgd=0000007ec7d13003, p4d=0000007ec7d13003, pud=0000007ec728c003, pmd=0000000000000000 [ 9513.956975] Internal error: Oops: 96000006 [#1] PREEMPT SMP [ 9513.962521] Modules linked in: vfio_pci vfio_virqfd vfio_iommu_type1 vfio hclge hns3 hnae3 [last unloaded: vfio_pci] [ 9513.972998] CPU: 4 PID: 1327 Comm: bash Tainted: G W 5.8.0-rc4+ #3 [ 9513.980443] Hardware name: Huawei TaiShan 2280 V2/BC82AMDC, BIOS 2280-V2 CS V3.B270.01 05/08/2020 [ 9513.989274] pstate: 80400089 (Nzcv daIf +PAN -UAO BTYPE=--) [ 9513.994827] pc : _raw_spin_lock_irqsave+0x48/0x88 [ 9513.999515] lr : eventfd_signal+0x6c/0x1b0 [ 9514.003591] sp : ffff800038a0b960 [ 9514.006889] x29: ffff800038a0b960 x28: ffff007ef7f4da10 [ 9514.012175] x27: ffff207eefbbfc80 x26: ffffbb7903457000 [ 9514.017462] x25: ffffbb7912191000 x24: ffff007ef7f4d400 [ 9514.022747] x23: ffff20be6e0e4c00 x22: 0000000000000008 [ 9514.028033] x21: 0000000000000000 x20: 0000000000000000 [ 9514.033321] x19: 0000000000000008 x18: 0000000000000000 [ 9514.038606] x17: 0000000000000000 x16: ffffbb7910029328 [ 9514.043893] x15: 0000000000000000 x14: 0000000000000001 [ 9514.049179] x13: 0000000000000000 x12: 0000000000000002 [ 9514.054466] x11: 0000000000000000 x10: 0000000000000a00 [ 9514.059752] x9 : ffff800038a0b840 x8 : ffff007ef7f4de60 [ 9514.065038] x7 : ffff007fffc96690 x6 : fffffe01faffb748 [ 9514.070324] x5 : 0000000000000000 x4 : 0000000000000000 [ 9514.075609] x3 : 0000000000000000 x2 : 0000000000000001 [ 9514.080895] x1 : ffff007ef7f4d400 x0 : 0000000000000000 [ 9514.086181] Call trace: [ 9514.088618] _raw_spin_lock_irqsave+0x48/0x88 [ 9514.092954] eventfd_signal+0x6c/0x1b0 [ 9514.096691] vfio_pci_request+0x84/0xd0 [vfio_pci] [ 9514.101464] vfio_del_group_dev+0x150/0x290 [vfio] [ 9514.106234] vfio_pci_remove+0x30/0x128 [vfio_pci] [ 9514.111007] pci_device_remove+0x48/0x108 [ 9514.115001] device_release_driver_internal+0x100/0x1b8 [ 9514.120200] device_release_driver+0x28/0x38 [ 9514.124452] pci_stop_bus_device+0x68/0xa8 [ 9514.128528] pci_stop_and_remove_bus_device+0x20/0x38 [ 9514.133557] pci_iov_remove_virtfn+0xb4/0x128 [ 9514.137893] sriov_disable+0x3c/0x108 [ 9514.141538] pci_disable_sriov+0x28/0x38 [ 9514.145445] hns3_pci_sriov_configure+0x48/0xb8 [hns3] [ 9514.150558] sriov_numvfs_store+0x110/0x198 [ 9514.154724] dev_attr_store+0x44/0x60 [ 9514.158373] sysfs_kf_write+0x5c/0x78 [ 9514.162018] kernfs_fop_write+0x104/0x210 [ 9514.166010] __vfs_write+0x48/0x90 [ 9514.169395] vfs_write+0xbc/0x1c0 [ 9514.172694] ksys_write+0x74/0x100 [ 9514.176079] __arm64_sys_write+0x24/0x30 [ 9514.179987] el0_svc_common.constprop.4+0x110/0x200 [ 9514.184842] do_el0_svc+0x34/0x98 [ 9514.188144] el0_svc+0x14/0x40 [ 9514.191185] el0_sync_handler+0xb0/0x2d0 [ 9514.195088] el0_sync+0x140/0x180 [ 9514.198389] Code: b9001020 d2800000 52800022 f9800271 (885ffe61) [ 9514.204455] ---[ end trace 648de00c8406465f ]--- [ 9514.212308] note: bash[1327] exited with preempt_count 1
Cc: Qian Cai cai@lca.pw Cc: Alex Williamson alex.williamson@redhat.com Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx") Signed-off-by: Zeng Tao prime.zeng@hisilicon.com Signed-off-by: Alex Williamson alex.williamson@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/vfio/pci/vfio_pci.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index b669be5a20066..a72fd5309b09f 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -474,14 +474,19 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = NULL; } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); vdev->req_trigger = NULL; } + mutex_unlock(&vdev->igate); }
mutex_unlock(&vdev->reflck->lock);
From: Qu Wenruo wqu@suse.com
[ Upstream commit fa91e4aa1716004ea8096d5185ec0451e206aea0 ]
[BUG] When running tests like generic/013 on test device with btrfs quota enabled, it can normally lead to data leak, detected at unmount time:
BTRFS warning (device dm-3): qgroup 0/5 has unreleased space, type 0 rsv 4096 ------------[ cut here ]------------ WARNING: CPU: 11 PID: 16386 at fs/btrfs/disk-io.c:4142 close_ctree+0x1dc/0x323 [btrfs] RIP: 0010:close_ctree+0x1dc/0x323 [btrfs] Call Trace: btrfs_put_super+0x15/0x17 [btrfs] generic_shutdown_super+0x72/0x110 kill_anon_super+0x18/0x30 btrfs_kill_super+0x17/0x30 [btrfs] deactivate_locked_super+0x3b/0xa0 deactivate_super+0x40/0x50 cleanup_mnt+0x135/0x190 __cleanup_mnt+0x12/0x20 task_work_run+0x64/0xb0 __prepare_exit_to_usermode+0x1bc/0x1c0 __syscall_return_slowpath+0x47/0x230 do_syscall_64+0x64/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 ---[ end trace caf08beafeca2392 ]--- BTRFS error (device dm-3): qgroup reserved space leaked
[CAUSE] In the offending case, the offending operations are: 2/6: writev f2X[269 1 0 0 0 0] [1006997,67,288] 0 2/7: truncate f2X[269 1 0 0 48 1026293] 18388 0
The following sequence of events could happen after the writev(): CPU1 (writeback) | CPU2 (truncate) ----------------------------------------------------------------- btrfs_writepages() | |- extent_write_cache_pages() | |- Got page for 1003520 | | 1003520 is Dirty, no writeback | | So (!clear_page_dirty_for_io()) | | gets called for it | |- Now page 1003520 is Clean. | | | btrfs_setattr() | | |- btrfs_setsize() | | |- truncate_setsize() | | New i_size is 18388 |- __extent_writepage() | | |- page_offset() > i_size | |- btrfs_invalidatepage() | |- Page is clean, so no qgroup | callback executed
This means, the qgroup reserved data space is not properly released in btrfs_invalidatepage() as the page is Clean.
[FIX] Instead of checking the dirty bit of a page, call btrfs_qgroup_free_data() unconditionally in btrfs_invalidatepage().
As qgroup rsv are completely bound to the QGROUP_RESERVED bit of io_tree, not bound to page status, thus we won't cause double freeing anyway.
Fixes: 0b34c261e235 ("btrfs: qgroup: Prevent qgroup->reserved from going subzero") CC: stable@vger.kernel.org # 4.14+ Reviewed-by: Josef Bacik josef@toxicpanda.com Signed-off-by: Qu Wenruo wqu@suse.com Signed-off-by: David Sterba dsterba@suse.com Signed-off-by: Sasha Levin sashal@kernel.org --- fs/btrfs/inode.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9787b7b943a2..182e93a5b11d5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -9044,20 +9044,17 @@ again: /* * Qgroup reserved space handler * Page here will be either - * 1) Already written to disk - * In this case, its reserved space is released from data rsv map - * and will be freed by delayed_ref handler finally. - * So even we call qgroup_free_data(), it won't decrease reserved - * space. - * 2) Not written to disk - * This means the reserved space should be freed here. However, - * if a truncate invalidates the page (by clearing PageDirty) - * and the page is accounted for while allocating extent - * in btrfs_check_data_free_space() we let delayed_ref to - * free the entire extent. + * 1) Already written to disk or ordered extent already submitted + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. + * Qgroup will be handled by its qgroup_record then. + * btrfs_qgroup_free_data() call will do nothing here. + * + * 2) Not written to disk yet + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED + * bit of its io_tree, and free the qgroup reserved data space. + * Since the IO will never happen for this page. */ - if (PageDirty(page)) - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); + btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
From: Thomas Richter tmricht@linux.ibm.com
[ Upstream commit 463538a383a27337cb83ae195e432a839a52d639 ]
Commit 5aa98879efe7 ("s390/cpum_sf: prohibit callchain data collection") prohibits call graph sampling for hardware events on s390. The information recorded is out of context and does not match.
On s390 this commit now breaks test case 68 Zstd perf.data compression/decompression.
Therefore omit call graph sampling on s390 in this test.
Output before: [root@t35lp46 perf]# ./perf test -Fv 68 68: Zstd perf.data compression/decompression : --- start --- Collecting compressed record file: Error: cycles: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat' ---- end ---- Zstd perf.data compression/decompression: FAILED! [root@t35lp46 perf]#
Output after: [root@t35lp46 perf]# ./perf test -Fv 68 68: Zstd perf.data compression/decompression : --- start --- Collecting compressed record file: 500+0 records in 500+0 records out 256000 bytes (256 kB, 250 KiB) copied, 0.00615638 s, 41.6 MB/s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.004 MB /tmp/perf.data.X3M, compressed (original 0.002 MB, ratio is 3.609) ] Checking compressed events stats: # compressed : Zstd, level = 1, ratio = 4 COMPRESSED events: 1 2ELIFREPh---- end ---- Zstd perf.data compression/decompression: Ok [root@t35lp46 perf]#
Signed-off-by: Thomas Richter tmricht@linux.ibm.com Reviewed-by: Sumanth Korikkar sumanthk@linux.ibm.com Cc: Heiko Carstens heiko.carstens@de.ibm.com Cc: Sven Schnelle svens@linux.ibm.com Cc: Vasily Gorbik gor@linux.ibm.com Link: http://lore.kernel.org/lkml/20200729135314.91281-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/tests/shell/record+zstd_comp_decomp.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh index 63a91ec473bb5..045723b3d9928 100755 --- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh +++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh @@ -12,7 +12,8 @@ skip_if_no_z_record() {
collect_z_record() { echo "Collecting compressed record file:" - $perf_tool record -o $trace_file -g -z -F 5000 -- \ + [[ "$(uname -m)" != s390x ]] && gflag='-g' + $perf_tool record -o $trace_file $gflag -z -F 5000 -- \ dd count=500 if=/dev/urandom of=/dev/null }
From: Quinn Tran qutran@marvell.com
[ Upstream commit 983f127603fac650fa34ee69db363e4615eaf9e7 ]
Current code will send PRLI with FC-NVMe bit set for the targets which support only FCP. This may result into issue with targets which do not understand NVMe and will go into a strange state. This patch would restart the login process by going back to PLOGI state. The PLOGI state will force the target to respond to correct PRLI request.
Fixes: c76ae845ea836 ("scsi: qla2xxx: Add error handling for PLOGI ELS passthrough") Cc: stable@vger.kernel.org # 5.4 Link: https://lore.kernel.org/r/20191105150657.8092-2-hmadhani@marvell.com Reviewed-by: Ewan D. Milne emilne@redhat.com Signed-off-by: Quinn Tran qutran@marvell.com Signed-off-by: Himanshu Madhani hmadhani@marvell.com Signed-off-by: Martin K. Petersen martin.petersen@oracle.com Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/scsi/qla2xxx/qla_init.c | 28 ++++------------------------ drivers/scsi/qla2xxx/qla_iocb.c | 6 +++++- 2 files changed, 9 insertions(+), 25 deletions(-)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 2f2e059f4575e..62d2ee825c97a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1911,33 +1911,13 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) "%s %d %8phC post fc4 prli\n", __func__, __LINE__, ea->fcport->port_name); ea->fcport->fc4f_nvme = 0; - qla24xx_post_prli_work(vha, ea->fcport); return; }
- /* at this point both PRLI NVME & PRLI FCP failed */ - if (N2N_TOPO(vha->hw)) { - if (ea->fcport->n2n_link_reset_cnt < 3) { - ea->fcport->n2n_link_reset_cnt++; - /* - * remote port is not sending Plogi. Reset - * link to kick start his state machine - */ - set_bit(N2N_LINK_RESET, &vha->dpc_flags); - } else { - ql_log(ql_log_warn, vha, 0x2119, - "%s %d %8phC Unable to reconnect\n", - __func__, __LINE__, ea->fcport->port_name); - } - } else { - /* - * switch connect. login failed. Take connection - * down and allow relogin to retrigger - */ - ea->fcport->flags &= ~FCF_ASYNC_SENT; - ea->fcport->keep_nport_handle = 0; - qlt_schedule_sess_for_deletion(ea->fcport); - } + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; + qlt_schedule_sess_for_deletion(ea->fcport); break; } } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2e272fc858ed1..aed4ce66e6cf9 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2773,6 +2773,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) ea.sp = sp; qla24xx_handle_plogi_done_event(vha, &ea); break; + case CS_IOCB_ERROR: switch (fw_status[1]) { case LSC_SCODE_PORTID_USED: @@ -2843,6 +2844,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT; + fcport->disc_state = DSC_LOGIN_FAILED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } @@ -2855,6 +2857,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT; + sp->fcport->disc_state = DSC_LOGIN_FAILED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; } @@ -2890,11 +2893,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, return -ENOMEM; }
+ fcport->flags |= FCF_ASYNC_SENT; + fcport->disc_state = DSC_LOGIN_PEND; elsio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_io, vha, 0x3073, "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
- fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport;
From: Hou Tao houtao1@huawei.com
[ Upstream commit c16f39d14a7e0ec59881fbdb22ae494907534384 ]
When CONFIG_MTD_UBI_FASTMAP is enabled, fm_anchor will be assigned a free PEB during ubi_wl_init() or ubi_update_fastmap(). However if fastmap is not used or disabled on the MTD device, ubi_wl_entry related with the PEB will not be freed during detach.
So Fix it by freeing the unused fastmap anchor during detach.
Fixes: f9c34bb52997 ("ubi: Fix producing anchor PEBs") Reported-by: syzbot+f317896aae32eb281a58@syzkaller.appspotmail.com Reviewed-by: Sascha Hauer s.hauer@pengutronix.de Signed-off-by: Hou Tao houtao1@huawei.com Signed-off-by: Richard Weinberger richard@nod.at Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/mtd/ubi/fastmap-wl.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 426820ab9afe1..b486250923c5a 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) return victim; }
+static inline void return_unused_peb(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->free); + ubi->free_count++; +} + /** * return_unused_pool_pebs - returns unused PEB to the free tree. * @ubi: UBI device description object @@ -52,8 +59,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
for (i = pool->used; i < pool->size; i++) { e = ubi->lookuptbl[pool->pebs[i]]; - wl_tree_add(e, &ubi->free); - ubi->free_count++; + return_unused_peb(ubi, e); } }
@@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) return_unused_pool_pebs(ubi, &ubi->fm_pool); return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ if (ubi->fm_anchor) { + return_unused_peb(ubi, ubi->fm_anchor); + ubi->fm_anchor = NULL; + } + if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]);
From: Arnd Bergmann arnd@arndb.de
[ Upstream commit d68f4e43a46ff1f772ff73085f96d44eb4163e9d ]
The mt76_led_cleanup() function is called unconditionally, which leads to a link error when CONFIG_LEDS is a loadable module or disabled but mt76 is built-in:
drivers/net/wireless/mediatek/mt76/mac80211.o: In function `mt76_unregister_device': mac80211.c:(.text+0x2ac): undefined reference to `led_classdev_unregister'
Use the same trick that is guarding the registration, using an IS_ENABLED() check for the CONFIG_MT76_LEDS symbol that indicates whether LEDs can be used or not.
Fixes: 36f7e2b2bb1d ("mt76: do not use devm API for led classdev") Signed-off-by: Arnd Bergmann arnd@arndb.de Acked-by: Felix Fietkau nbd@nbd.name Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/net/wireless/mediatek/mt76/mac80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 7be5806a1c398..8bd191347b9fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -368,7 +368,8 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw;
- mt76_led_cleanup(dev); + if (IS_ENABLED(CONFIG_MT76_LEDS)) + mt76_led_cleanup(dev); mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); }
From: Walter Lozano walter.lozano@collabora.com
[ Upstream commit 6544abc520f0fff701e9da382110dc29676c683a ]
Currently, when using _of_add_opp_table_v2 parsed_static_opps is increased and this value is used in _opp_remove_all_static() to check if there are static opp entries that need to be freed. Unfortunately this does not happen when using _of_add_opp_table_v1(), which leads to warnings.
This patch increases parsed_static_opps in _of_add_opp_table_v1() in a similar way as in _of_add_opp_table_v2().
Fixes: 03758d60265c ("opp: Replace list_kref with a local counter") Cc: v5.6+ stable@vger.kernel.org # v5.6+ Signed-off-by: Walter Lozano walter.lozano@collabora.com [ Viresh: Do the operation with lock held and set the value to 1 instead of incrementing it ] Signed-off-by: Viresh Kumar viresh.kumar@linaro.org Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/opp/of.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9cd8f0adacae4..249738e1e0b7a 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -733,6 +733,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) return -EINVAL; }
+ mutex_lock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); + val = prop->value; while (nr) { unsigned long freq = be32_to_cpup(val++) * 1000;
From: Jin Yao yao.jin@linux.intel.com
[ Upstream commit 8510895bafdbf7c4dd24c22946d925691135c2b2 ]
A big uncore event group is split into multiple small groups which only include the uncore events from the same PMU. This has been supported in the commit 3cdc5c2cb924a ("perf parse-events: Handle uncore event aliases in small groups properly").
If the event's PMU name starts to repeat, it must be a new event. That can be used to distinguish the leader from other members. But now it only compares the pointer of pmu_name (leader->pmu_name == evsel->pmu_name).
If we use "perf stat -M LLC_MISSES.PCIE_WRITE -a" on cascadelakex, the event list is:
evsel->name evsel->pmu_name --------------------------------------------------------------- unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_4 (as leader) unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_2 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_0 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_5 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_3 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_1 unc_iio_data_req_of_cpu.mem_write.part1 uncore_iio_4 ......
For the event "unc_iio_data_req_of_cpu.mem_write.part1" with "uncore_iio_4", it should be the event from PMU "uncore_iio_4". It's not a new leader for this PMU.
But if we use "(leader->pmu_name == evsel->pmu_name)", the check would be failed and the event is stored to leaders[] as a new PMU leader.
So this patch uses strcmp to compare the PMU name between events.
Fixes: d4953f7ef1a2 ("perf parse-events: Fix 3 use after frees found with clang ASAN") Signed-off-by: Jin Yao yao.jin@linux.intel.com Acked-by: Jiri Olsa jolsa@redhat.com Cc: Alexander Shishkin alexander.shishkin@linux.intel.com Cc: Andi Kleen ak@linux.intel.com Cc: Jin Yao yao.jin@intel.com Cc: Kan Liang kan.liang@linux.intel.com Cc: Peter Zijlstra peterz@infradead.org Link: http://lore.kernel.org/lkml/20200430003618.17002-1-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo acme@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/util/parse-events.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index f16748cfcb262..3fb9d53666d15 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1507,12 +1507,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, * event. That can be used to distinguish the leader from * other members, even they have the same event name. */ - if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) { + if ((leader != evsel) && + !strcmp(leader->pmu_name, evsel->pmu_name)) { is_leader = false; continue; } - /* The name is always alias name */ - WARN_ON(strcmp(leader->name, evsel->name));
/* Store the leader event for each PMU */ leaders[nr_pmu++] = (uintptr_t) evsel;
From: Takashi Iwai tiwai@suse.de
[ Upstream commit 8d6762af302d69f76fa788a277a56a9d9cd275d5 ]
HD-audio codec driver applies a tricky procedure to forcibly perform the runtime resume by mimicking the usage count even if the device has been runtime-suspended beforehand. This was needed to assure to trigger the jack detection update after the system resume.
And recently we also applied the similar logic to the HD-audio controller side. However this seems leading to some inconsistency, and eventually PCI controller gets screwed up.
This patch is an attempt to fix and clean up those behavior: instead of the tricky runtime resume procedure, the existing jackpoll work is scheduled when such a forced codec resume is required. The jackpoll work will power up the codec, and this alone should suffice for the jack status update in usual cases. If the extra polling is requested (by checking codec->jackpoll_interval), the manual update is invoked after that, and the codec is powered down again.
Also, we filter the spurious wake up of the codec from the controller runtime resume by checking codec->relaxed_resume flag. If this flag is set, basically we don't need to wake up explicitly, but it's supposed to be done via the audio component notifier.
Fixes: c4c8dd6ef807 ("ALSA: hda: Skip controller resume if not needed") Link: https://lore.kernel.org/r/20200422203744.26299-1-tiwai@suse.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/pci/hda/hda_codec.c | 28 +++++++++++++++++----------- sound/pci/hda/hda_intel.c | 17 ++--------------- 2 files changed, 19 insertions(+), 26 deletions(-)
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 12da263fb02ba..6da296def283e 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -641,8 +641,18 @@ static void hda_jackpoll_work(struct work_struct *work) struct hda_codec *codec = container_of(work, struct hda_codec, jackpoll_work.work);
- snd_hda_jack_set_dirty_all(codec); - snd_hda_jack_poll_all(codec); + /* for non-polling trigger: we need nothing if already powered on */ + if (!codec->jackpoll_interval && snd_hdac_is_power_on(&codec->core)) + return; + + /* the power-up/down sequence triggers the runtime resume */ + snd_hda_power_up_pm(codec); + /* update jacks manually if polling is required, too */ + if (codec->jackpoll_interval) { + snd_hda_jack_set_dirty_all(codec); + snd_hda_jack_poll_all(codec); + } + snd_hda_power_down_pm(codec);
if (!codec->jackpoll_interval) return; @@ -2958,18 +2968,14 @@ static int hda_codec_runtime_resume(struct device *dev) static int hda_codec_force_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); - bool forced_resume = hda_codec_need_resume(codec); int ret;
- /* The get/put pair below enforces the runtime resume even if the - * device hasn't been used at suspend time. This trick is needed to - * update the jack state change during the sleep. - */ - if (forced_resume) - pm_runtime_get_noresume(dev); ret = pm_runtime_force_resume(dev); - if (forced_resume) - pm_runtime_put(dev); + /* schedule jackpoll work for jack detection update */ + if (codec->jackpoll_interval || + (pm_runtime_suspended(dev) && hda_codec_need_resume(codec))) + schedule_delayed_work(&codec->jackpoll_work, + codec->jackpoll_interval); return ret; }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index a6e8aaa091c7d..754e4d1a86b57 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1002,7 +1002,8 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
if (status && from_rt) { list_for_each_codec(codec, &chip->bus) - if (status & (1 << codec->addr)) + if (!codec->relaxed_resume && + (status & (1 << codec->addr))) schedule_delayed_work(&codec->jackpoll_work, codec->jackpoll_interval); } @@ -1041,9 +1042,7 @@ static int azx_suspend(struct device *dev) static int azx_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); - struct hda_codec *codec; struct azx *chip; - bool forced_resume = false;
if (!azx_is_pm_ready(card)) return 0; @@ -1055,19 +1054,7 @@ static int azx_resume(struct device *dev) if (azx_acquire_irq(chip, 1) < 0) return -EIO;
- /* check for the forced resume */ - list_for_each_codec(codec, &chip->bus) { - if (hda_codec_need_resume(codec)) { - forced_resume = true; - break; - } - } - - if (forced_resume) - pm_runtime_get_noresume(dev); pm_runtime_force_resume(dev); - if (forced_resume) - pm_runtime_put(dev); snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
From: Takashi Iwai tiwai@suse.de
[ Upstream commit a6630529aecb5a3e84370c376ed658e892e6261e ]
We've received a regression report on Intel HD-audio controller that wakes up immediately after S3 suspend. The bisection leads to the commit c4c8dd6ef807 ("ALSA: hda: Skip controller resume if not needed"). This commit replaces the system-suspend to use pm_runtime_force_suspend() instead of the direct call of __azx_runtime_suspend(). However, by some really mysterious reason, pm_runtime_force_suspend() causes a spurious wakeup (although it calls the same __azx_runtime_suspend() internally).
As an ugly workaround for now, revert the behavior to call __azx_runtime_suspend() and __azx_runtime_resume() for those old Intel platforms that may exhibit such a problem, while keeping the new standard pm_runtime_force_suspend() and pm_runtime_force_resume() pair for the remaining chips.
Fixes: c4c8dd6ef807 ("ALSA: hda: Skip controller resume if not needed") BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=208649 Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200727164443.4233-1-tiwai@suse.de Signed-off-by: Takashi Iwai tiwai@suse.de Signed-off-by: Sasha Levin sashal@kernel.org --- sound/pci/hda/hda_controller.h | 2 +- sound/pci/hda/hda_intel.c | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 82e26442724ba..a356fb0e57738 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -41,7 +41,7 @@ /* 24 unused */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ -/* 27 unused */ +#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 754e4d1a86b57..590ea262f2e20 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -295,7 +295,8 @@ enum { /* PCH for HSW/BDW; with runtime PM */ /* no i915 binding for this as HSW/BDW has another controller for HDMI */ #define AZX_DCAPS_INTEL_PCH \ - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ + AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
/* HSW HDMI */ #define AZX_DCAPS_INTEL_HASWELL \ @@ -1026,7 +1027,14 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); - pm_runtime_force_suspend(dev); + /* An ugly workaround: direct call of __azx_runtime_suspend() and + * __azx_runtime_resume() for old Intel platforms that suffer from + * spurious wakeups after S3 suspend + */ + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_suspend(chip); + else + pm_runtime_force_suspend(dev); if (bus->irq >= 0) { free_irq(bus->irq, chip); bus->irq = -1; @@ -1054,7 +1062,10 @@ static int azx_resume(struct device *dev) if (azx_acquire_irq(chip, 1) < 0) return -EIO;
- pm_runtime_force_resume(dev); + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_resume(chip, false); + else + pm_runtime_force_resume(dev); snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
From: Tonghao Zhang xiangxia.m.yue@gmail.com
[ Upstream commit 659d4587fe7233bfdff303744b20d6f41ad04362 ]
Compile the kernel for arm 32 platform, the build warning found. To fix that, should use div_u64() for divisions. | net/openvswitch/meter.c:396: undefined reference to `__udivdi3'
[add more commit msg, change reported tag, and use div_u64 instead of do_div by Tonghao]
Fixes: e57358873bb5d6ca ("net: openvswitch: use u64 for meter bucket") Reported-by: kbuild test robot lkp@intel.com Signed-off-by: Tonghao Zhang xiangxia.m.yue@gmail.com Tested-by: Tonghao Zhang xiangxia.m.yue@gmail.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Sasha Levin sashal@kernel.org --- net/openvswitch/meter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index b10734f18bbd6..541eea74ef7a6 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -252,7 +252,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) * Start with a full bucket. */ band->bucket = (band->burst_size + band->rate) * 1000ULL; - band_max_delta_t = band->bucket / band->rate; + band_max_delta_t = div_u64(band->bucket, band->rate); if (band_max_delta_t > meter->max_delta_t) meter->max_delta_t = band_max_delta_t; band++;
From: Anthony Iliopoulos ailiop@suse.com
[ Upstream commit 05b29021fba5e725dd385151ef00b6340229b500 ]
Commit 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is blocked") reverted multipath head disk revalidation due to deadlocks caused by holding the bd_mutex during revalidate.
Updating the multipath disk blockdev size is still required though for userspace to be able to observe any resizing while the device is mounted. Directly update the bdev inode size to avoid unnecessarily holding the bdev->bd_mutex.
Fixes: 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is blocked")
Signed-off-by: Anthony Iliopoulos ailiop@suse.com Signed-off-by: Christoph Hellwig hch@lst.de Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/nvme/host/core.c | 1 + drivers/nvme/host/nvme.h | 13 +++++++++++++ 2 files changed, 14 insertions(+)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index bbf52e88f045a..038ef9d113388 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1864,6 +1864,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + nvme_mpath_update_disk_size(ns->head->disk); } #endif } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 7d57c42a641ca..aab976737f9a9 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -561,6 +561,16 @@ static inline void nvme_trace_bio_complete(struct request *req, req->bio, status); }
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute subsys_attr_iopolicy; @@ -636,6 +646,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
On Thu, Sep 17, 2020 at 7:01 PM Sasha Levin sashal@kernel.org wrote:
From: Iago Toral Quiroga itoral@igalia.com
[ Upstream commit 0d352a3a8a1f26168d09f7073e61bb4b328e3bb9 ]
If the initialization of the job fails we need to kfree() it before returning.
Signed-off-by: Iago Toral Quiroga itoral@igalia.com Signed-off-by: Eric Anholt eric@anholt.net Link: https://patchwork.freedesktop.org/patch/msgid/20190916071125.5255-1-itoral@i... Fixes: a783a09ee76d ("drm/v3d: Refactor job management.") Reviewed-by: Eric Anholt eric@anholt.net Signed-off-by: Sasha Levin sashal@kernel.org
You're double freeing with this patch, the bug is already solved.
drivers/gpu/drm/v3d/v3d_gem.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 19c092d75266b..6316bf3646af5 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -565,6 +565,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) {
kfree(bin); v3d_job_put(&render->base); kfree(bin); return ret;
-- 2.25.1
linux-stable-mirror@lists.linaro.org