From: Peter Zijlstra peterz@infradead.org
commit 54da6a0924311c7cf5015533991e44fb8eb12773 upstream.
Use __attribute__((__cleanup__(func))) to build:
- simple auto-release pointers using __free()
- 'classes' with constructor and destructor semantics for scope-based resource management.
- lock guards based on the above classes.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Lukas Wunner lukas@wunner.de Link: https://lkml.kernel.org/r/20230612093537.614161713%40infradead.org --- include/linux/cleanup.h | 171 ++++++++++++++++++++++++++++ include/linux/compiler-clang.h | 9 ++ include/linux/compiler_attributes.h | 6 + include/linux/device.h | 7 ++ include/linux/file.h | 6 + include/linux/irqflags.h | 7 ++ include/linux/mutex.h | 4 + include/linux/percpu.h | 4 + include/linux/preempt.h | 5 + include/linux/rcupdate.h | 3 + include/linux/rwsem.h | 9 ++ include/linux/sched/task.h | 2 + include/linux/slab.h | 3 + include/linux/spinlock.h | 31 +++++ include/linux/srcu.h | 5 + scripts/checkpatch.pl | 2 +- 16 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 include/linux/cleanup.h
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h new file mode 100644 index 000000000000..53f1a7a932b0 --- /dev/null +++ b/include/linux/cleanup.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GUARDS_H +#define __LINUX_GUARDS_H + +#include <linux/compiler.h> + +/* + * DEFINE_FREE(name, type, free): + * simple helper macro that defines the required wrapper for a __free() + * based cleanup function. @free is an expression using '_T' to access + * the variable. + * + * __free(name): + * variable attribute to add a scoped based cleanup to the variable. + * + * no_free_ptr(var): + * like a non-atomic xchg(var, NULL), such that the cleanup function will + * be inhibited -- provided it sanely deals with a NULL value. + * + * return_ptr(p): + * returns p while inhibiting the __free(). + * + * Ex. + * + * DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) + * + * struct obj *p __free(kfree) = kmalloc(...); + * if (!p) + * return NULL; + * + * if (!init_obj(p)) + * return NULL; + * + * return_ptr(p); + */ + +#define DEFINE_FREE(_name, _type, _free) \ + static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; } + +#define __free(_name) __cleanup(__free_##_name) + +#define no_free_ptr(p) \ + ({ __auto_type __ptr = (p); (p) = NULL; __ptr; }) + +#define return_ptr(p) return no_free_ptr(p) + + +/* + * DEFINE_CLASS(name, type, exit, init, init_args...): + * helper to define the destructor and constructor for a type. + * @exit is an expression using '_T' -- similar to FREE above. + * @init is an expression in @init_args resulting in @type + * + * EXTEND_CLASS(name, ext, init, init_args...): + * extends class @name to @name@ext with the new constructor + * + * CLASS(name, var)(args...): + * declare the variable @var as an instance of the named class + * + * Ex. + * + * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) + * + * CLASS(fdget, f)(fd); + * if (!f.file) + * return -EBADF; + * + * // use 'f' without concern + */ + +#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \ +typedef _type class_##_name##_t; \ +static inline void class_##_name##_destructor(_type *p) \ +{ _type _T = *p; _exit; } \ +static inline _type class_##_name##_constructor(_init_args) \ +{ _type t = _init; return t; } + +#define EXTEND_CLASS(_name, ext, _init, _init_args...) \ +typedef class_##_name##_t class_##_name##ext##_t; \ +static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\ +{ class_##_name##_destructor(p); } \ +static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ +{ class_##_name##_t t = _init; return t; } + +#define CLASS(_name, var) \ + class_##_name##_t var __cleanup(class_##_name##_destructor) = \ + class_##_name##_constructor + + +/* + * DEFINE_GUARD(name, type, lock, unlock): + * trivial wrapper around DEFINE_CLASS() above specifically + * for locks. + * + * guard(name): + * an anonymous instance of the (guard) class + * + * scoped_guard (name, args...) { }: + * similar to CLASS(name, scope)(args), except the variable (with the + * explicit name 'scope') is declard in a for-loop such that its scope is + * bound to the next (compound) statement. + * + */ + +#define DEFINE_GUARD(_name, _type, _lock, _unlock) \ + DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T) + +#define guard(_name) \ + CLASS(_name, __UNIQUE_ID(guard)) + +#define scoped_guard(_name, args...) \ + for (CLASS(_name, scope)(args), \ + *done = NULL; !done; done = (void *)1) + +/* + * Additional helper macros for generating lock guards with types, either for + * locks that don't have a native type (eg. RCU, preempt) or those that need a + * 'fat' pointer (eg. spin_lock_irqsave). + * + * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...) + * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...) + * + * will result in the following type: + * + * typedef struct { + * type *lock; // 'type := void' for the _0 variant + * __VA_ARGS__; + * } class_##name##_t; + * + * As above, both _lock and _unlock are statements, except this time '_T' will + * be a pointer to the above struct. + */ + +#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \ +typedef struct { \ + _type *lock; \ + __VA_ARGS__; \ +} class_##_name##_t; \ + \ +static inline void class_##_name##_destructor(class_##_name##_t *_T) \ +{ \ + if (_T->lock) { _unlock; } \ +} + + +#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \ +static inline class_##_name##_t class_##_name##_constructor(_type *l) \ +{ \ + class_##_name##_t _t = { .lock = l }, *_T = &_t; \ + _lock; \ + return _t; \ +} + +#define __DEFINE_LOCK_GUARD_0(_name, _lock) \ +static inline class_##_name##_t class_##_name##_constructor(void) \ +{ \ + class_##_name##_t _t = { .lock = (void*)1 }, \ + *_T __maybe_unused = &_t; \ + _lock; \ + return _t; \ +} + +#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ +__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ +__DEFINE_LOCK_GUARD_1(_name, _type, _lock) + +#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ +__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ +__DEFINE_LOCK_GUARD_0(_name, _lock) + +#endif /* __LINUX_GUARDS_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 9ba951e3a6c2..d1f6f594c508 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -15,6 +15,15 @@
/* Compiler specific definitions for Clang compiler */
+/* + * Clang prior to 17 is being silly and considers many __cleanup() variables + * as unused (because they are, their sole purpose is to go out of scope). + * + * https://reviews.llvm.org/D152180 + */ +#undef __cleanup +#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func))) + /* same as gcc, this was present in clang-2.6 so we can assume it works * with any version that can compile the kernel */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 3982c2dc541e..089fd96378a7 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -93,6 +93,12 @@ */ #define __cold __attribute__((__cold__))
+/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cle... + * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup + */ +#define __cleanup(func) __attribute__((__cleanup__(func))) + /* * Note the long name. * diff --git a/include/linux/device.h b/include/linux/device.h index 9c9ce573c737..fb15f50894fa 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -30,6 +30,7 @@ #include <linux/device/bus.h> #include <linux/device/class.h> #include <linux/device/driver.h> +#include <linux/cleanup.h> #include <asm/device.h>
struct device; @@ -826,6 +827,9 @@ void device_unregister(struct device *dev); void device_initialize(struct device *dev); int __must_check device_add(struct device *dev); void device_del(struct device *dev); + +DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T)) + int device_for_each_child(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); int device_for_each_child_reverse(struct device *dev, void *data, @@ -952,6 +956,9 @@ extern int (*platform_notify_remove)(struct device *dev); */ struct device *get_device(struct device *dev); void put_device(struct device *dev); + +DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T)) + bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS diff --git a/include/linux/file.h b/include/linux/file.h index 225982792fa2..5b4e31388648 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <linux/posix_types.h> #include <linux/errno.h> +#include <linux/cleanup.h>
struct file;
@@ -82,6 +83,8 @@ static inline void fdput_pos(struct fd f) fdput(f); }
+DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd) + extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); extern void set_close_on_exec(unsigned int fd, int flag); @@ -90,6 +93,9 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile); extern int get_unused_fd_flags(unsigned flags); extern void put_unused_fd(unsigned int fd);
+DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T), + get_unused_fd_flags(flags), unsigned flags) + extern void fd_install(unsigned int fd, struct file *file);
extern int __receive_fd(int fd, struct file *file, int __user *ufd, diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index a53adf4316a5..9beebc188fd4 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -13,6 +13,7 @@ #define _LINUX_TRACE_IRQFLAGS_H
#include <linux/typecheck.h> +#include <linux/cleanup.h> #include <asm/irqflags.h> #include <asm/percpu.h>
@@ -248,4 +249,10 @@ do { \
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable()) +DEFINE_LOCK_GUARD_0(irqsave, + local_irq_save(_T->flags), + local_irq_restore(_T->flags), + unsigned long flags) + #endif diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 4d671fba3cab..386f436e5148 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -19,6 +19,7 @@ #include <asm/processor.h> #include <linux/osq_lock.h> #include <linux/debug_locks.h> +#include <linux/cleanup.h>
struct ww_acquire_ctx;
@@ -199,6 +200,9 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T)) +DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T)) + /* * These values are chosen such that FAIL and SUCCESS match the * values of the regular mutex_trylock(). diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5e76af742c80..c9a84532bb79 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -9,6 +9,7 @@ #include <linux/printk.h> #include <linux/pfn.h> #include <linux/init.h> +#include <linux/cleanup.h>
#include <asm/percpu.h>
@@ -134,6 +135,9 @@ extern void __init setup_per_cpu_areas(void); extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); extern void __percpu *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void __percpu *__pdata); + +DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) + extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#define alloc_percpu_gfp(type, gfp) \ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 7d9c1c0e149c..9a70e1e315d3 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -8,6 +8,7 @@ */
#include <linux/linkage.h> +#include <linux/cleanup.h> #include <linux/list.h>
/* @@ -352,4 +353,8 @@ static __always_inline void migrate_enable(void) preempt_enable(); }
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) +DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) +DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9db6710e6ee7..7c7c3fce6bb2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -27,6 +27,7 @@ #include <linux/preempt.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> +#include <linux/cleanup.h> #include <asm/processor.h> #include <linux/cpumask.h>
@@ -1055,4 +1056,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) extern int rcu_expedited; extern int rcu_normal;
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock()) + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 4c715be48717..debdd9c3a5e5 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,8 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/err.h> +#include <linux/cleanup.h> + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif @@ -152,6 +154,13 @@ extern void up_read(struct rw_semaphore *sem); */ extern void up_write(struct rw_semaphore *sem);
+DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T)) +DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) + +DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T)) +DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T)) + + /* * downgrade write lock to read lock */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index de21a45a4ee7..10545f5f6bb5 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -143,6 +143,8 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); }
+DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T)) + static inline void put_task_struct_many(struct task_struct *t, int nr) { if (refcount_sub_and_test(nr, &t->usage)) diff --git a/include/linux/slab.h b/include/linux/slab.h index dd6897f62010..dac1c1decf76 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -17,6 +17,7 @@ #include <linux/types.h> #include <linux/workqueue.h> #include <linux/percpu-refcount.h> +#include <linux/cleanup.h>
/* @@ -187,6 +188,8 @@ void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *);
+DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) + #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, bool to_user); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc..9aea8c4e2d7c 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -57,6 +57,7 @@ #include <linux/stringify.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> +#include <linux/cleanup.h> #include <asm/barrier.h> #include <asm/mmiowb.h>
@@ -493,4 +494,34 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, + raw_spin_lock(_T->lock), + raw_spin_unlock(_T->lock)) + +DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, + raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), + raw_spin_unlock(_T->lock)) + +DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, + raw_spin_lock_irq(_T->lock), + raw_spin_unlock_irq(_T->lock)) + +DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, + raw_spin_lock_irqsave(_T->lock, _T->flags), + raw_spin_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + +DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, + spin_lock(_T->lock), + spin_unlock(_T->lock)) + +DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, + spin_lock_irq(_T->lock), + spin_unlock_irq(_T->lock)) + +DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, + spin_lock_irqsave(_T->lock, _T->flags), + spin_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a0895bbf71ce..e35168e0b952 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -205,4 +205,9 @@ static inline void smp_mb__after_srcu_read_unlock(void) /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ }
+DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, + _T->idx = srcu_read_lock(_T->lock), + srcu_read_unlock(_T->lock, _T->idx), + int idx) + #endif diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 0ad235ee96f9..9a1c5ac4ba07 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -4522,7 +4522,7 @@ sub process { if|for|while|switch|return|case| volatile|__volatile__| __attribute__|format|__extension__| - asm|__asm__)$/x) + asm|__asm__|scoped_guard)$/x) { # cpp #define statements have non-optional spaces, ie # if there is a space between the name and the open
From: Ira Weiny ira.weiny@intel.com
commit ced085ef369af7a2b6da962ec2fbd01339f60693 upstream.
The "goto error" pattern is notorious for introducing subtle resource leaks. Use the new cleanup.h helpers for PCI device reference counts.
Similar to the new put_device() cleanup helper, __free(put_device), define the same for PCI devices, __free(pci_dev_put). These helpers eliminate the need for "goto free;" patterns. For example, a 'struct pci_dev *' instance declared as:
struct pci_dev *pdev __free(pci_dev_put) = NULL;
...will automatically call pci_dev_put() if @pdev is non-NULL when @pdev goes out of scope (automatic variable scope). If a function wants to invoke pci_dev_put() on error, but return @pdev on success, it can do:
return no_free_ptr(pdev);
...or:
return_ptr(pdev);
For potential cleanup opportunity there are 587 open-coded calls to pci_dev_put() in the kernel with 65 instances within 10 lines of a goto statement with the CXL driver threatening to add another one.
Cc: Bjorn Helgaas bhelgaas@google.com Signed-off-by: Ira Weiny ira.weiny@intel.com Link: https://lore.kernel.org/r/20231220-cxl-cper-v5-8-1bb8a4ca2c7a@intel.com [djbw: rewrite changelog] Acked-by: Bjorn Helgaas bhelgaas@google.com Reviewed-by: Jonathan Cameron Jonathan.Cameron@huawei.com Acked-by: Ard Biesheuvel ardb@kernel.org Signed-off-by: Dan Williams dan.j.williams@intel.com [lukas: drop DEFINE_GUARD() helper as pci_dev_lock() doesn't exist in v5.10] Signed-off-by: Lukas Wunner lukas@wunner.de --- include/linux/pci.h | 1 + 1 file changed, 1 insertion(+)
diff --git a/include/linux/pci.h b/include/linux/pci.h index 30bc462fb196..f497aaf1d032 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1069,6 +1069,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); struct pci_dev *pci_dev_get(struct pci_dev *dev); void pci_dev_put(struct pci_dev *dev); +DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T)) void pci_remove_bus(struct pci_bus *b); void pci_stop_and_remove_bus_device(struct pci_dev *dev); void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
commit 11a1f4bc47362700fcbde717292158873fb847ed upstream.
Keith reports a use-after-free when a DPC event occurs concurrently to hot-removal of the same portion of the hierarchy:
The dpc_handler() awaits readiness of the secondary bus below the Downstream Port where the DPC event occurred. To do so, it polls the config space of the first child device on the secondary bus. If that child device is concurrently removed, accesses to its struct pci_dev cause the kernel to oops.
That's because pci_bridge_wait_for_secondary_bus() neglects to hold a reference on the child device. Before v6.3, the function was only called on resume from system sleep or on runtime resume. Holding a reference wasn't necessary back then because the pciehp IRQ thread could never run concurrently. (On resume from system sleep, IRQs are not enabled until after the resume_noirq phase. And runtime resume is always awaited before a PCI device is removed.)
However starting with v6.3, pci_bridge_wait_for_secondary_bus() is also called on a DPC event. Commit 53b54ad074de ("PCI/DPC: Await readiness of secondary bus after reset"), which introduced that, failed to appreciate that pci_bridge_wait_for_secondary_bus() now needs to hold a reference on the child device because dpc_handler() and pciehp may indeed run concurrently. The commit was backported to v5.10+ stable kernels, so that's the oldest one affected.
Add the missing reference acquisition.
Abridged stack trace:
BUG: unable to handle page fault for address: 00000000091400c0 CPU: 15 PID: 2464 Comm: irq/53-pcie-dpc 6.9.0 RIP: pci_bus_read_config_dword+0x17/0x50 pci_dev_wait() pci_bridge_wait_for_secondary_bus() dpc_reset_link() pcie_do_recovery() dpc_handler()
Fixes: 53b54ad074de ("PCI/DPC: Await readiness of secondary bus after reset") Closes: https://lore.kernel.org/r/20240612181625.3604512-3-kbusch@meta.com/ Link: https://lore.kernel.org/linux-pci/8e4bcd4116fd94f592f2bf2749f168099c480ddf.1... Reported-by: Keith Busch kbusch@kernel.org Tested-by: Keith Busch kbusch@kernel.org Signed-off-by: Lukas Wunner lukas@wunner.de Signed-off-by: Krzysztof WilczyĆski kwilczynski@kernel.org Reviewed-by: Keith Busch kbusch@kernel.org Reviewed-by: Mika Westerberg mika.westerberg@linux.intel.com Cc: stable@vger.kernel.org # v5.10+ --- drivers/pci/pci.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 530ced8f7abd..5fa626589b90 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4816,7 +4816,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type, int timeout) { - struct pci_dev *child; + struct pci_dev *child __free(pci_dev_put) = NULL; int delay;
if (pci_dev_is_disconnected(dev)) @@ -4845,8 +4845,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type, return 0; }
- child = list_first_entry(&dev->subordinate->devices, struct pci_dev, - bus_list); + child = pci_dev_get(list_first_entry(&dev->subordinate->devices, + struct pci_dev, bus_list)); up_read(&pci_bus_sem);
/*
On Tue, Jul 30, 2024 at 11:30:51AM +0200, Lukas Wunner wrote:
From: Peter Zijlstra peterz@infradead.org
commit 54da6a0924311c7cf5015533991e44fb8eb12773 upstream.
Use __attribute__((__cleanup__(func))) to build:
simple auto-release pointers using __free()
'classes' with constructor and destructor semantics for scope-based resource management.
lock guards based on the above classes.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Lukas Wunner lukas@wunner.de Link: https://lkml.kernel.org/r/20230612093537.614161713%40infradead.org
Do we really want this in 5.10? Are there any compiler versions that 5.10 still has to support that will break with this?
Same for 5.15.y, I'm loath to apply this to older kernels without loads of testing.
thanks,
greg k-h
On Tue, Jul 30, 2024 at 11:40:28AM +0200, Greg Kroah-Hartman wrote:
On Tue, Jul 30, 2024 at 11:30:51AM +0200, Lukas Wunner wrote:
From: Peter Zijlstra peterz@infradead.org
commit 54da6a0924311c7cf5015533991e44fb8eb12773 upstream.
Use __attribute__((__cleanup__(func))) to build:
simple auto-release pointers using __free()
'classes' with constructor and destructor semantics for scope-based resource management.
lock guards based on the above classes.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Lukas Wunner lukas@wunner.de Link: https://lkml.kernel.org/r/20230612093537.614161713%40infradead.org
Do we really want this in 5.10?
I think so because we may have to apply a lot more patches between now and Dec 2026 which use __cleanup variable attributes.
(Dec 2026 is EOL for 5.10 and 5.15.)
Are there any compiler versions that 5.10 still has to support that will break with this?
No, apparently not.
5.10 requires gcc >= 4.9 or clang >= 10.0.1 5.15 requires gcc >= 5.1 or clang >= 10.0.1
I've looked through gcc docs and the first version mentioning __cleanup is gcc 3.3.6.
The situation around clang seems odd: __cleanup isn't mentioned in their docs before 15.0.0, yet we're only requiring 13.0.1 in the just-released v6.10.
Same for 5.15.y, I'm loath to apply this to older kernels without loads of testing.
I'm sorry, I don't have the resources to perform that amount of testing. I've sent you a backport of the PCI/DPC patch instead which avoids __cleanup.
Thanks,
Lukas
linux-stable-mirror@lists.linaro.org