[ Sasha's backport helper bot ]
Hi,
Summary of potential issues: ⚠️ Found matching upstream commit but patch is missing proper reference to it ℹ️ Patch is missing in 6.13.y (ignore if backport was sent) ⚠️ Commit missing in all newer stable branches
Found matching upstream commit: dfd3df31c9db752234d7d2e09bef2aeabb643ce4
Status in newer kernel trees: 6.13.y | Not found
Note: The patch differs from the upstream commit: --- 1: dfd3df31c9db7 ! 1: b4fb63fe8c845 mm/slab/kvfree_rcu: Switch to WQ_MEM_RECLAIM wq @@ Commit message Reviewed-by: Joel Fernandes joelagnelf@nvidia.com Signed-off-by: Vlastimil Babka vbabka@suse.cz
- ## mm/slab_common.c ## -@@ mm/slab_common.c: module_param(rcu_min_cached_objs, int, 0444); - static int rcu_delay_page_cache_fill_msec = 5000; - module_param(rcu_delay_page_cache_fill_msec, int, 0444); + ## kernel/rcu/tree.c ## +@@ kernel/rcu/tree.c: void call_rcu(struct rcu_head *head, rcu_callback_t func) + } + EXPORT_SYMBOL_GPL(call_rcu);
+static struct workqueue_struct *rcu_reclaim_wq; + /* Maximum number of jiffies to wait before draining a batch. */ #define KFREE_DRAIN_JIFFIES (5 * HZ) #define KFREE_N_BATCHES 2 -@@ mm/slab_common.c: __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) +@@ kernel/rcu/tree.c: __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) if (delayed_work_pending(&krcp->monitor_work)) { delay_left = krcp->monitor_work.timer.expires - jiffies; if (delay < delay_left) @@ mm/slab_common.c: __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) }
static void -@@ mm/slab_common.c: kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) +@@ kernel/rcu/tree.c: kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) // "free channels", the batch can handle. Break // the loop since it is done with this CPU thus // queuing an RCU work is _always_ success here. @@ mm/slab_common.c: kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) WARN_ON_ONCE(!queued); break; } -@@ mm/slab_common.c: run_page_cache_worker(struct kfree_rcu_cpu *krcp) +@@ kernel/rcu/tree.c: run_page_cache_worker(struct kfree_rcu_cpu *krcp) if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && !atomic_xchg(&krcp->work_in_progress, 1)) { if (atomic_read(&krcp->backoff_page_cache_fill)) { @@ mm/slab_common.c: run_page_cache_worker(struct kfree_rcu_cpu *krcp) &krcp->page_cache_work, msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); } else { -@@ mm/slab_common.c: void __init kvfree_rcu_init(void) +@@ kernel/rcu/tree.c: static void __init kfree_rcu_batch_init(void) int i, j; struct shrinker *kfree_rcu_shrinker;
---
Results of testing on various branches:
| Branch | Patch Apply | Build Test | |---------------------------|-------------|------------| | stable/linux-6.12.y | Success | Success |