On Tue, Jan 10, 2023 at 1:23 PM Greg Kroah-Hartman gregkh@linuxfoundation.org wrote:
From: Paul E. McKenney paulmck@kernel.org
commit 96017bf9039763a2e02dcc6adaa18592cd73a39d upstream.
Thanks Greg, I had sent the same patch earlier for 5.15. Just so I learn, anything I did wrong or should have done differently?
- Joel
Currently, trc_wait_for_one_reader() atomically increments the trc_n_readers_need_end counter before sending the IPI invoking trc_read_check_handler(). All failure paths out of trc_read_check_handler() and also from the smp_call_function_single() within trc_wait_for_one_reader() must carefully atomically decrement this counter. This is more complex than it needs to be.
This commit therefore simplifies things and saves a few lines of code by dispensing with the atomic decrements in favor of having trc_read_check_handler() do the atomic increment only in the success case. In theory, this represents no change in functionality.
Signed-off-by: Paul E. McKenney paulmck@kernel.org Cc: Joel Fernandes joel@joelfernandes.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
kernel/rcu/tasks.h | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-)
--- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -892,32 +892,24 @@ static void trc_read_check_handler(void
// If the task is no longer running on this CPU, leave. if (unlikely(texp != t)) {
if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
wake_up(&trc_wait); goto reset_ipi; // Already on holdout list, so will check later. } // If the task is not in a read-side critical section, and // if this is the last reader, awaken the grace-period kthread. if (likely(!READ_ONCE(t->trc_reader_nesting))) {
if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
wake_up(&trc_wait);
// Mark as checked after decrement to avoid false
// positives on the above WARN_ON_ONCE(). WRITE_ONCE(t->trc_reader_checked, true); goto reset_ipi; } // If we are racing with an rcu_read_unlock_trace(), try again later.
if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
wake_up(&trc_wait);
if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) goto reset_ipi;
} WRITE_ONCE(t->trc_reader_checked, true); // Get here if the task is in a read-side critical section. Set // its state so that it will awaken the grace-period kthread upon // exit from that critical section.
atomic_inc(&trc_n_readers_need_end); // One more to wait on. WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
@@ -1017,21 +1009,15 @@ static void trc_wait_for_one_reader(stru if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) return;
atomic_inc(&trc_n_readers_need_end); per_cpu(trc_ipi_to_cpu, cpu) = true; t->trc_ipi_to_cpu = cpu; rcu_tasks_trace.n_ipis++;
if (smp_call_function_single(cpu,
trc_read_check_handler, t, 0)) {
if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { // Just in case there is some other reason for // failure than the target CPU being offline. rcu_tasks_trace.n_ipis_fails++; per_cpu(trc_ipi_to_cpu, cpu) = false; t->trc_ipi_to_cpu = cpu;
if (atomic_dec_and_test(&trc_n_readers_need_end)) {
WARN_ON_ONCE(1);
wake_up(&trc_wait);
} } }
}