armv7pmu_disable_event() is called during irq handler. If irq handling switches over to fiq then the spin locks in this function risks deadlock. Both armv7_pmnc_disable_counter() and armv7_pmnc_disable_intens() are unconditional co-processor writes. I haven't yet come up with an schedule where other users of pmu_lock would break if interleaved with these calls so I have simply removed them.
The other changed required it so avoid calling irq_work_run() when run from a FIQ handler. The pended work will either be dispatched by the irq work IPI or by a timer handler.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm/kernel/perf_event_v7.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8993770c47de..08f426486d3e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -744,7 +744,6 @@ static void armv7pmu_enable_event(struct perf_event *event)
static void armv7pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); @@ -757,11 +756,6 @@ static void armv7pmu_disable_event(struct perf_event *event) }
/* - * Disable counter and interrupt - */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); - - /* * Disable counter */ armv7_pmnc_disable_counter(idx); @@ -770,8 +764,6 @@ static void armv7pmu_disable_event(struct perf_event *event) * Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); }
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) @@ -831,7 +823,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ - irq_work_run(); + if (!in_nmi()) + irq_work_run();
return IRQ_HANDLED; }