On 08/09, Steven Rostedt wrote:
--- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -952,7 +952,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
list_del_rcu(&link->list); /* synchronize with u{,ret}probe_trace_func */
synchronize_sched();
synchronize_rcu();
Can't we change uprobe_trace_func() and uretprobe_trace_func() to use rcu_read_lock_sched() instead? It is more cheap.
Hmm. probe_event_enable() does list_del + kfree on failure, this doesn't look right... Not only because kfree() can race with list_for_each_entry_rcu(), we should not put the 1st link on list until uprobe_buffer_enable().
Does the patch below make sense or I am confused?
Oleg.
--- x/kernel/trace/trace_uprobe.c +++ x/kernel/trace/trace_uprobe.c @@ -896,8 +896,6 @@ probe_event_enable(struct trace_uprobe * return -ENOMEM;
link->file = file; - list_add_tail_rcu(&link->list, &tu->tp.files); - tu->tp.flags |= TP_FLAG_TRACE; } else { if (tu->tp.flags & TP_FLAG_TRACE) @@ -909,7 +907,7 @@ probe_event_enable(struct trace_uprobe * WARN_ON(!uprobe_filter_is_empty(&tu->filter));
if (enabled) - return 0; + goto add;
ret = uprobe_buffer_enable(); if (ret) @@ -920,7 +918,8 @@ probe_event_enable(struct trace_uprobe * ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); if (ret) goto err_buffer; - + add: + list_add_tail_rcu(&link->list, &tu->tp.files); return 0;
err_buffer: @@ -928,7 +927,6 @@ probe_event_enable(struct trace_uprobe *
err_flags: if (file) { - list_del(&link->list); kfree(link); tu->tp.flags &= ~TP_FLAG_TRACE; } else {