From: LKML haiyangz lkmlhyz@microsoft.com On Behalf Of Haiyang Zhang Sent: Thursday, January 26, 2023 1:05 PM
After calling irq_set_affinity_and_hint(), the cpumask pointer is saved in desc->affinity_hint, and will be used later when reading /proc/irq/<num>/affinity_hint. So the cpumask variable needs to be allocated per irq, and available until freeing the irq. Otherwise, we are accessing freed memory when reading the affinity_hint file.
To fix the bug, allocate the cpumask per irq, and free it just before freeing the irq.
Since the cpumask being passed to irq_set_affinity_and_hint() always contains exactly one CPU, the code can be considerably simplified by using the pre-calculated and persistent masks available as cpumask_of(cpu). All allocation of cpumasks in this code goes away, and you can set the affinity_hint to NULL in the cleanup and remove paths without having to free any masks.
Michael
Cc: stable@vger.kernel.org Fixes: 71fa6887eeca ("net: mana: Assign interrupts to CPUs based on NUMA nodes") Signed-off-by: Haiyang Zhang haiyangz@microsoft.com
.../net/ethernet/microsoft/mana/gdma_main.c | 40 ++++++++++--------- include/net/mana/gdma.h | 1 + 2 files changed, 23 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 3bae9d4c1f08..37473ae3859c 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1219,7 +1219,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) struct gdma_irq_context *gic; unsigned int max_irqs; u16 *cpus;
- cpumask_var_t req_mask; int nvec, irq; int err, i = 0, j;
@@ -1240,25 +1239,26 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) goto free_irq_vector; }
- if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
err = -ENOMEM;
goto free_irq;
- }
- cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL); if (!cpus) { err = -ENOMEM;
goto free_mask;
goto free_gic;
} for (i = 0; i < nvec; i++) cpus[i] = cpumask_local_spread(i, gc->numa_node);
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i]; gic->handler = NULL; gic->arg = NULL;cpumask_set_cpu(cpus[i], req_mask);
if (!zalloc_cpumask_var(&gic->cpu_hint, GFP_KERNEL)) {
err = -ENOMEM;
goto free_irq;
}
cpumask_set_cpu(cpus[i], gic->cpu_hint);
- if (!i) snprintf(gic->name, MANA_IRQ_NAME_SZ,
"mana_hwc@pci:%s", pci_name(pdev)); @@ -1269,17 +1269,18 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) irq = pci_irq_vector(pdev, i); if (irq < 0) { err = irq;
goto free_mask;
free_cpumask_var(gic->cpu_hint);
goto free_irq;
}
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
goto free_mask;
irq_set_affinity_and_hint(irq, req_mask);
cpumask_clear(req_mask);
if (err) {
free_cpumask_var(gic->cpu_hint);
goto free_irq;
}
}irq_set_affinity_and_hint(irq, gic->cpu_hint);
free_cpumask_var(req_mask);
kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource); if (err)
@@ -1288,20 +1289,22 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) gc->max_num_msix = nvec; gc->num_msix_usable = nvec;
- kfree(cpus); return 0;
-free_mask:
- free_cpumask_var(req_mask);
- kfree(cpus);
free_irq: for (j = i - 1; j >= 0; j--) { irq = pci_irq_vector(pdev, j); gic = &gc->irq_contexts[j];
irq_update_affinity_hint(irq, NULL);
free_cpumask_var(gic->cpu_hint);
free_irq(irq, gic); }
kfree(cpus);
+free_gic: kfree(gc->irq_contexts); gc->irq_contexts = NULL; free_irq_vector: @@ -1329,6 +1332,7 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
/* Need to clear the hint before free_irq */ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic); }free_cpumask_var(gic->cpu_hint);
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 56189e4252da..4dcafecbd89e 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -342,6 +342,7 @@ struct gdma_irq_context { void (*handler)(void *arg); void *arg; char name[MANA_IRQ_NAME_SZ];
- cpumask_var_t cpu_hint;
};
struct gdma_context {
2.25.1