I'm announcing the release of the 5.10.129 kernel.
All users of the 5.10 kernel series must upgrade.
The updated 5.10.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-5.10.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Makefile | 2 arch/arm/xen/p2m.c | 6 arch/powerpc/include/asm/bpf_perf_event.h | 9 arch/powerpc/include/uapi/asm/bpf_perf_event.h | 9 arch/powerpc/kernel/prom_init_check.sh | 2 arch/powerpc/mm/nohash/book3e_pgtable.c | 6 arch/s390/Kconfig | 1 arch/s390/crypto/arch_random.c | 111 ------- arch/s390/include/asm/archrandom.h | 13 arch/s390/kernel/setup.c | 5 drivers/block/xen-blkfront.c | 56 ++- drivers/clocksource/timer-ixp4xx.c | 1 drivers/cpufreq/qoriq-cpufreq.c | 1 drivers/devfreq/event/exynos-ppmu.c | 8 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 3 drivers/hwmon/ibmaem.c | 12 drivers/infiniband/core/cm.c | 4 drivers/infiniband/hw/qedr/qedr.h | 1 drivers/infiniband/hw/qedr/verbs.c | 4 drivers/md/dm-raid.c | 34 +- drivers/md/raid5.c | 1 drivers/net/bonding/bond_3ad.c | 3 drivers/net/bonding/bond_alb.c | 2 drivers/net/caif/caif_virtio.c | 10 drivers/net/dsa/bcm_sf2.c | 5 drivers/net/ethernet/smsc/epic100.c | 4 drivers/net/phy/dp83822.c | 4 drivers/net/tun.c | 15 drivers/net/usb/ax88179_178a.c | 101 ++++-- drivers/net/usb/qmi_wwan.c | 2 drivers/net/usb/usbnet.c | 4 drivers/net/virtio_net.c | 8 drivers/net/xen-netfront.c | 56 +++ drivers/nfc/nfcmrvl/i2c.c | 6 drivers/nfc/nfcmrvl/spi.c | 6 drivers/nfc/nxp-nci/i2c.c | 3 drivers/nvdimm/bus.c | 4 drivers/nvme/host/pci.c | 3 drivers/xen/gntdev-common.h | 7 drivers/xen/gntdev.c | 140 ++++++--- fs/io_uring.c | 4 fs/iomap/buffered-io.c | 7 fs/nfsd/vfs.c | 3 fs/xfs/libxfs/xfs_btree.c | 12 fs/xfs/libxfs/xfs_sb.c | 16 - fs/xfs/xfs_aops.c | 17 - fs/xfs/xfs_error.c | 2 fs/xfs/xfs_reflink.c | 3 fs/xfs/xfs_super.c | 120 ++++--- fs/xfs/xfs_trans.c | 23 - fs/xfs/xfs_trans.h | 30 + include/linux/dim.h | 2 net/ipv4/ip_tunnel_core.c | 2 net/ipv4/tcp_ipv4.c | 4 net/ipv6/addrconf.c | 4 net/ipv6/route.c | 9 net/ipv6/seg6_hmac.c | 1 net/ipv6/sit.c | 10 net/netfilter/nft_set_hash.c | 2 net/rose/rose_timer.c | 34 +- net/sched/act_api.c | 22 - net/sunrpc/xdr.c | 2 net/tipc/node.c | 41 +- tools/testing/selftests/net/udpgso_bench.sh | 2 tools/testing/selftests/rseq/Makefile | 2 tools/testing/selftests/rseq/basic_percpu_ops_test.c | 5 tools/testing/selftests/rseq/compiler.h | 30 + tools/testing/selftests/rseq/param_test.c | 8 tools/testing/selftests/rseq/rseq-abi.h | 151 +++++++++ tools/testing/selftests/rseq/rseq-arm.h | 110 +++---- tools/testing/selftests/rseq/rseq-arm64.h | 79 +++-- tools/testing/selftests/rseq/rseq-generic-thread-pointer.h | 25 + tools/testing/selftests/rseq/rseq-mips.h | 71 +--- tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h | 30 + tools/testing/selftests/rseq/rseq-ppc.h | 128 +++++--- tools/testing/selftests/rseq/rseq-s390.h | 55 ++- tools/testing/selftests/rseq/rseq-skip.h | 2 tools/testing/selftests/rseq/rseq-thread-pointer.h | 19 + tools/testing/selftests/rseq/rseq-x86-thread-pointer.h | 40 ++ tools/testing/selftests/rseq/rseq-x86.h | 200 ++++++++----- tools/testing/selftests/rseq/rseq.c | 165 +++++----- tools/testing/selftests/rseq/rseq.h | 30 + 82 files changed, 1426 insertions(+), 768 deletions(-)
Alexey Khoroshilov (1): NFSD: restore EINVAL error translation in nfsd_commit()
Anthony Iliopoulos (1): xfs: fix xfs_trans slab cache name
Carlo Lobrano (1): net: usb: qmi_wwan: add Telit 0x1060 composition
Chris Ye (1): nvdimm: Fix badblocks clear off-by-one error
Christophe Leroy (1): powerpc/book3e: Fix PUD allocation size in map_kernel_page()
Chuck Lever (1): SUNRPC: Fix READ_PLUS crasher
Daniele Palmas (1): net: usb: qmi_wwan: add Telit 0x1070 composition
Darrick J. Wong (1): xfs: fix xfs_reflink_unshare usage of filemap_write_and_wait_range
Dave Chinner (2): xfs: use current->journal_info for detecting transaction recursion xfs: update superblock counters correctly for !lazysbcount
Demi Marie Obenour (1): xen/gntdev: Avoid blocking in unmap_grant_pages()
Dimitris Michailidis (1): selftests/net: pass ipv6_args to udpgso_bench's IPv6 TCP test
Doug Berger (1): net: dsa: bcm_sf2: force pause link settings
Duoming Zhou (1): net: rose: fix UAF bugs caused by timer handler
Enguerrand de Ribaucourt (2): net: dp83822: disable false carrier interrupt net: dp83822: disable rx error interrupt
Eric Dumazet (3): net: bonding: fix possible NULL deref in rlb code tunnels: do not assume mac header is set in skb_tunnel_check_pmtu() tcp: add a missing nf_reset_ct() in 3WHS handling
Gao Xiang (1): xfs: ensure xfs_errortag_random_default matches XFS_ERRTAG_MAX
Greg Kroah-Hartman (2): clocksource/drivers/ixp4xx: remove EXPORT_SYMBOL_GPL from ixp4xx_timer_setup() Linux 5.10.129
Heinz Mauelshagen (1): dm raid: fix accesses beyond end of raid member array
Jakub Kicinski (3): net: tun: unlink NAPI from device on destruction net: tun: stop NAPI when detaching queues net: tun: avoid disabling NAPI twice
Jan Beulich (1): xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
Jason A. Donenfeld (1): s390/archrandom: simplify back to earlier design and initialize earlier
Jason Wang (2): virtio-net: fix race between ndo_open() and virtio_device_ready() caif_virtio: fix race between virtio_device_ready() and ndo_open()
Jens Axboe (1): io_uring: ensure that send/sendmsg and recv/recvmsg check sqe->ioprio
Jose Alonso (1): net: usb: ax88179_178a: Fix packet receiving
Kamal Heib (1): RDMA/qedr: Fix reporting QP timeout attribute
Krzysztof Kozlowski (1): nfc: nfcmrvl: Fix irq_of_parse_and_map() return value
Liam Howlett (1): powerpc/prom_init: Fix kernel config grep
Liang He (1): drivers: cpufreq: Add missing of_node_put() in qoriq-cpufreq.c
Masahiro Yamada (1): s390: remove unneeded 'select BUILD_BIN2C'
Mathieu Desnoyers (15): selftests/rseq: introduce own copy of rseq uapi header selftests/rseq: Remove useless assignment to cpu variable selftests/rseq: Remove volatile from __rseq_abi selftests/rseq: Introduce rseq_get_abi() helper selftests/rseq: Introduce thread pointer getters selftests/rseq: Uplift rseq selftests for compatibility with glibc-2.35 selftests/rseq: Fix ppc32: wrong rseq_cs 32-bit field pointer on big endian selftests/rseq: Fix ppc32 missing instruction selection "u" and "x" for load/store selftests/rseq: Fix ppc32 offsets by using long rather than off_t selftests/rseq: Fix warnings about #if checks of undefined tokens selftests/rseq: Remove arm/mips asm goto compiler work-around selftests/rseq: Fix: work-around asm goto compiler bugs selftests/rseq: x86-64: use %fs segment selector for accessing rseq thread area selftests/rseq: x86-32: use %gs segment selector for accessing rseq thread area selftests/rseq: Change type of rseq_offset to ptrdiff_t
Miaoqian Lin (2): RDMA/cm: Fix memory leak in ib_cm_insert_listen PM / devfreq: exynos-ppmu: Fix refcount leak in of_get_devfreq_events
Michael Walle (1): NFC: nxp-nci: Don't issue a zero length i2c_master_read()
Mikulas Patocka (1): dm raid: fix KASAN warning in raid5_add_disks
Naveen N. Rao (1): powerpc/bpf: Fix use of user_pt_regs in uapi
Nicolas Dichtel (1): ipv6: take care of disable_policy when restoring routes
Oleksandr Tyshchenko (1): xen/arm: Fix race in RB-tree based P2M accounting
Oliver Neukum (1): usbnet: fix memory allocation in helpers
Pablo Greco (1): nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG SX6000LNP (AKA SPECTRIX S40G)
Pablo Neira Ayuso (1): netfilter: nft_dynset: restore set element counter when failing to update
Pavel Reichl (2): xfs: rename variable mp to parsing_mp xfs: Skip repetitive warnings about mount options
Roger Pau Monne (4): xen/blkfront: fix leaking data in shared pages xen/netfront: fix leaking data in shared pages xen/netfront: force data bouncing when backend is untrusted xen/blkfront: force data bouncing when backend is untrusted
Ruili Ji (1): drm/amdgpu: To flush tlb for MMHUB of RAVEN series
Shuah Khan (1): selftests/rseq: remove ARRAY_SIZE define from individual tests
Tao Liu (1): linux/dim: Fix divide by 0 in RDMA DIM
Tong Zhang (1): epic100: fix use after free on rmmod
Victor Nogueira (1): net/sched: act_api: Notify user space if any actions were flushed before error
Xin Long (1): tipc: move bc link creation back to tipc_node_create
Yang Yingliang (1): hwmon: (ibmaem) don't call platform_device_del() if platform_device_add() fails
Yevhen Orlov (1): net: bonding: fix use-after-free after 802.3ad slave unbind
YueHaibing (1): net: ipv6: unexport __init-annotated seg6_hmac_net_init()
katrinzhou (1): ipv6/sit: fix ipip6_tunnel_get_prl return value
kernel test robot (1): sit: use min
diff --git a/Makefile b/Makefile index b89ad8a987db..7d52cee37488 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 128 +SUBLEVEL = 129 EXTRAVERSION = NAME = Dare mighty things
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index acb464547a54..4a1991a103ea 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -62,11 +62,12 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
unsigned long __pfn_to_mfn(unsigned long pfn) { - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; struct xen_p2m_entry *entry; unsigned long irqflags;
read_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (entry->pfn <= pfn && @@ -153,10 +154,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, int rc; unsigned long irqflags; struct xen_p2m_entry *p2m_entry; - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n;
if (mfn == INVALID_P2M_ENTRY) { write_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h new file mode 100644 index 000000000000..e8a7b4ffb58c --- /dev/null +++ b/arch/powerpc/include/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H +#define _ASM_POWERPC_BPF_PERF_EVENT_H + +#include <asm/ptrace.h> + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */ diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h deleted file mode 100644 index 5e1e648aeec4..000000000000 --- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ -#define _UAPI__ASM_BPF_PERF_EVENT_H__ - -#include <asm/ptrace.h> - -typedef struct user_pt_regs bpf_user_pt_regs_t; - -#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b183ab9c5107..dfa5f729f774 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -13,7 +13,7 @@ # If you really need to reference something from prom_init.o add # it to the list below:
-grep "^CONFIG_KASAN=y$" .config >/dev/null +grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null if [ $? -eq 0 ] then MEM_FUNCS="__memcpy __memset" diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c index 77884e24281d..3d845e001c87 100644 --- a/arch/powerpc/mm/nohash/book3e_pgtable.c +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c @@ -95,8 +95,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (p4d_none(*p4dp)) { - pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); - p4d_populate(&init_mm, p4dp, pmdp); + pudp = early_alloc_pgtable(PUD_TABLE_SIZE); + p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (pud_none(*pudp)) { @@ -105,7 +105,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) } pmdp = pmd_offset(pudp, ea); if (!pmd_present(*pmdp)) { - ptep = early_alloc_pgtable(PAGE_SIZE); + ptep = early_alloc_pgtable(PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 896b68e541b2..878993982e39 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -507,7 +507,6 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" select KEXEC_CORE - select BUILD_BIN2C depends on CRYPTO depends on CRYPTO_SHA256 depends on CRYPTO_SHA256_S390 diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c index 4cbb4b6d85a8..1f2d40993c4d 100644 --- a/arch/s390/crypto/arch_random.c +++ b/arch/s390/crypto/arch_random.c @@ -2,126 +2,17 @@ /* * s390 arch random implementation. * - * Copyright IBM Corp. 2017, 2018 + * Copyright IBM Corp. 2017, 2020 * Author(s): Harald Freudenberger - * - * The s390_arch_random_generate() function may be called from random.c - * in interrupt context. So this implementation does the best to be very - * fast. There is a buffer of random data which is asynchronously checked - * and filled by a workqueue thread. - * If there are enough bytes in the buffer the s390_arch_random_generate() - * just delivers these bytes. Otherwise false is returned until the - * worker thread refills the buffer. - * The worker fills the rng buffer by pulling fresh entropy from the - * high quality (but slow) true hardware random generator. This entropy - * is then spread over the buffer with an pseudo random generator PRNG. - * As the arch_get_random_seed_long() fetches 8 bytes and the calling - * function add_interrupt_randomness() counts this as 1 bit entropy the - * distribution needs to make sure there is in fact 1 bit entropy contained - * in 8 bytes of the buffer. The current values pull 32 byte entropy - * and scatter this into a 2048 byte buffer. So 8 byte in the buffer - * will contain 1 bit of entropy. - * The worker thread is rescheduled based on the charge level of the - * buffer but at least with 500 ms delay to avoid too much CPU consumption. - * So the max. amount of rng data delivered via arch_get_random_seed is - * limited to 4k bytes per second. */
#include <linux/kernel.h> #include <linux/atomic.h> #include <linux/random.h> -#include <linux/slab.h> #include <linux/static_key.h> -#include <linux/workqueue.h> #include <asm/cpacf.h>
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0); EXPORT_SYMBOL(s390_arch_random_counter); - -#define ARCH_REFILL_TICKS (HZ/2) -#define ARCH_PRNG_SEED_SIZE 32 -#define ARCH_RNG_BUF_SIZE 2048 - -static DEFINE_SPINLOCK(arch_rng_lock); -static u8 *arch_rng_buf; -static unsigned int arch_rng_buf_idx; - -static void arch_rng_refill_buffer(struct work_struct *); -static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); - -bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) -{ - /* max hunk is ARCH_RNG_BUF_SIZE */ - if (nbytes > ARCH_RNG_BUF_SIZE) - return false; - - /* lock rng buffer */ - if (!spin_trylock(&arch_rng_lock)) - return false; - - /* try to resolve the requested amount of bytes from the buffer */ - arch_rng_buf_idx -= nbytes; - if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) { - memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes); - atomic64_add(nbytes, &s390_arch_random_counter); - spin_unlock(&arch_rng_lock); - return true; - } - - /* not enough bytes in rng buffer, refill is done asynchronously */ - spin_unlock(&arch_rng_lock); - - return false; -} -EXPORT_SYMBOL(s390_arch_random_generate); - -static void arch_rng_refill_buffer(struct work_struct *unused) -{ - unsigned int delay = ARCH_REFILL_TICKS; - - spin_lock(&arch_rng_lock); - if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) { - /* buffer is exhausted and needs refill */ - u8 seed[ARCH_PRNG_SEED_SIZE]; - u8 prng_wa[240]; - /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */ - cpacf_trng(NULL, 0, seed, sizeof(seed)); - /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */ - memset(prng_wa, 0, sizeof(prng_wa)); - cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, - &prng_wa, NULL, 0, seed, sizeof(seed)); - cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, - &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0); - arch_rng_buf_idx = ARCH_RNG_BUF_SIZE; - } - delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE; - spin_unlock(&arch_rng_lock); - - /* kick next check */ - queue_delayed_work(system_long_wq, &arch_rng_work, delay); -} - -static int __init s390_arch_random_init(void) -{ - /* all the needed PRNO subfunctions available ? */ - if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) && - cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) { - - /* alloc arch random working buffer */ - arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL); - if (!arch_rng_buf) - return -ENOMEM; - - /* kick worker queue job to fill the random buffer */ - queue_delayed_work(system_long_wq, - &arch_rng_work, ARCH_REFILL_TICKS); - - /* enable arch random to the outside world */ - static_branch_enable(&s390_arch_random_available); - } - - return 0; -} -arch_initcall(s390_arch_random_init); diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h index de61ce562052..2c6e1c6ecbe7 100644 --- a/arch/s390/include/asm/archrandom.h +++ b/arch/s390/include/asm/archrandom.h @@ -2,7 +2,7 @@ /* * Kernel interface for the s390 arch_random_* functions * - * Copyright IBM Corp. 2017 + * Copyright IBM Corp. 2017, 2020 * * Author: Harald Freudenberger freude@de.ibm.com * @@ -15,12 +15,11 @@
#include <linux/static_key.h> #include <linux/atomic.h> +#include <asm/cpacf.h>
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available); extern atomic64_t s390_arch_random_counter;
-bool s390_arch_random_generate(u8 *buf, unsigned int nbytes); - static inline bool __must_check arch_get_random_long(unsigned long *v) { return false; @@ -34,7 +33,9 @@ static inline bool __must_check arch_get_random_int(unsigned int *v) static inline bool __must_check arch_get_random_seed_long(unsigned long *v) { if (static_branch_likely(&s390_arch_random_available)) { - return s390_arch_random_generate((u8 *)v, sizeof(*v)); + cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); + atomic64_add(sizeof(*v), &s390_arch_random_counter); + return true; } return false; } @@ -42,7 +43,9 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v) static inline bool __must_check arch_get_random_seed_int(unsigned int *v) { if (static_branch_likely(&s390_arch_random_available)) { - return s390_arch_random_generate((u8 *)v, sizeof(*v)); + cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v)); + atomic64_add(sizeof(*v), &s390_arch_random_counter); + return true; } return false; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f9f8721dc532..520cf5a152cf 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -1009,6 +1009,11 @@ static void __init setup_randomness(void) if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); memblock_free((unsigned long) vmms, PAGE_SIZE); + +#ifdef CONFIG_ARCH_RANDOM + if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG)) + static_branch_enable(&s390_arch_random_available); +#endif }
/* diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 47d4bb23d6f3..abbb68b6d9bd 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -151,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order; module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
+static bool __read_mostly xen_blkif_trusted = true; +module_param_named(trusted, xen_blkif_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define BLK_RING_SIZE(info) \ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
@@ -208,6 +212,7 @@ struct blkfront_info unsigned int feature_discard:1; unsigned int feature_secdiscard:1; unsigned int feature_persistent:1; + unsigned int bounce:1; unsigned int discard_granularity; unsigned int discard_alignment; /* Number of 4KB segments handled */ @@ -310,8 +315,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) if (!gnt_list_entry) goto out_of_memory;
- if (info->feature_persistent) { - granted_page = alloc_page(GFP_NOIO); + if (info->bounce) { + granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; @@ -330,7 +335,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) list_for_each_entry_safe(gnt_list_entry, n, &rinfo->grants, node) { list_del(&gnt_list_entry->node); - if (info->feature_persistent) + if (info->bounce) __free_page(gnt_list_entry->page); kfree(gnt_list_entry); i--; @@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (info->feature_persistent) + if (info->bounce) grant_foreign_access(gnt_list_entry, info); else { /* Grant access to the GFN passed by the caller */ @@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (!info->feature_persistent) { + if (!info->bounce) { struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */ @@ -715,7 +720,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri .grant_idx = 0, .segments = NULL, .rinfo = rinfo, - .need_copy = rq_data_dir(req) && info->feature_persistent, + .need_copy = rq_data_dir(req) && info->bounce, };
/* @@ -1035,11 +1040,12 @@ static void xlvbd_flush(struct blkfront_info *info) { blk_queue_write_cache(info->rq, info->feature_flush ? true : false, info->feature_fua ? true : false); - pr_info("blkfront: %s: %s %s %s %s %s\n", + pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", - info->max_indirect_segments ? "enabled;" : "disabled;"); + info->max_indirect_segments ? "enabled;" : "disabled;", + "bounce buffer:", info->bounce ? "enabled" : "disabled;"); }
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) @@ -1273,7 +1279,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n;
- BUG_ON(info->feature_persistent); + BUG_ON(info->bounce); list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); @@ -1290,7 +1296,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) 0, 0UL); rinfo->persistent_gnts_c--; } - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1311,7 +1317,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) for (j = 0; j < segs; j++) { persistent_gnt = rinfo->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1501,7 +1507,7 @@ static int blkif_completion(unsigned long *id, data.s = s; num_sg = s->num_sg;
- if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { + if (bret->operation == BLKIF_OP_READ && info->bounce) { for_each_sg(s->sg, sg, num_sg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@@ -1560,7 +1566,7 @@ static int blkif_completion(unsigned long *id, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - if (!info->feature_persistent) { + if (!info->bounce) { indirect_page = s->indirect_grants[i]->page; list_add(&indirect_page->lru, &rinfo->indirect_pages); } @@ -1753,7 +1759,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF;
- sring = alloc_pages_exact(ring_size, GFP_NOIO); + sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1857,6 +1863,10 @@ static int talk_to_blkback(struct xenbus_device *dev, if (!info) return -ENODEV;
+ /* Check if backend is trusted. */ + info->bounce = !xen_blkif_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); @@ -2283,17 +2293,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) if (err) goto out_of_memory;
- if (!info->feature_persistent && info->max_indirect_segments) { + if (!info->bounce && info->max_indirect_segments) { /* - * We are using indirect descriptors but not persistent - * grants, we need to allocate a set of pages that can be + * We are using indirect descriptors but don't have a bounce + * buffer, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_KERNEL); + struct page *indirect_page = alloc_page(GFP_KERNEL | + __GFP_ZERO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2386,6 +2397,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) info->feature_persistent = !!xenbus_read_unsigned(info->xbdev->otherend, "feature-persistent", 0); + if (info->feature_persistent) + info->bounce = true;
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, "feature-max-indirect-segments", 0); @@ -2759,6 +2772,13 @@ static void blkfront_delay_work(struct work_struct *work) struct blkfront_info *info; bool need_schedule_work = false;
+ /* + * Note that when using bounce buffers but not persistent grants + * there's no need to run blkfront_delay_work because grants are + * revoked in blkif_completion or else an error is reported and the + * connection is closed. + */ + mutex_lock(&blkfront_mutex);
list_for_each_entry(info, &info_list, info_list) { diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c index 9396745e1c17..ad904bbbac6f 100644 --- a/drivers/clocksource/timer-ixp4xx.c +++ b/drivers/clocksource/timer-ixp4xx.c @@ -258,7 +258,6 @@ void __init ixp4xx_timer_setup(resource_size_t timerbase, } ixp4xx_timer_register(base, timer_irq, timer_freq); } -EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
#ifdef CONFIG_OF static __init int ixp4xx_of_timer_init(struct device_node *np) diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 6b6b20da2bcf..573b417e1483 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -275,6 +275,7 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev)
np = of_find_matching_node(NULL, qoriq_cpufreq_blacklist); if (np) { + of_node_put(np); dev_info(&pdev->dev, "Disabling due to erratum A-008083"); return -ENODEV; } diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index 17ed980d9099..d6da9c3e3106 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -514,15 +514,19 @@ static int of_get_devfreq_events(struct device_node *np,
count = of_get_child_count(events_np); desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL); - if (!desc) + if (!desc) { + of_node_put(events_np); return -ENOMEM; + } info->num_events = count;
of_id = of_match_device(exynos_ppmu_id_match, dev); if (of_id) info->ppmu_type = (enum exynos_ppmu_type)of_id->data; - else + else { + of_node_put(events_np); return -EINVAL; + }
j = 0; for_each_child_of_node(events_np, node) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index fb6230c62daa..d3a974d10552 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -689,7 +689,8 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid) const uint32_t flush_type = 0; bool all_hub = false;
- if (adev->family == AMDGPU_FAMILY_AI) + if (adev->family == AMDGPU_FAMILY_AI || + adev->family == AMDGPU_FAMILY_RV) all_hub = true;
return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index a4ec85207782..2e6d6a5cffa1 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -550,7 +550,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
res = platform_device_add(data->pdev); if (res) - goto ipmi_err; + goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@@ -598,7 +598,9 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) ipmi_destroy_user(data->ipmi.user); ipmi_err: platform_set_drvdata(data->pdev, NULL); - platform_device_unregister(data->pdev); + platform_device_del(data->pdev); +dev_add_err: + platform_device_put(data->pdev); dev_err: ida_simple_remove(&aem_ida, data->id); id_err: @@ -690,7 +692,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
res = platform_device_add(data->pdev); if (res) - goto ipmi_err; + goto dev_add_err;
platform_set_drvdata(data->pdev, data);
@@ -738,7 +740,9 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe, ipmi_destroy_user(data->ipmi.user); ipmi_err: platform_set_drvdata(data->pdev, NULL); - platform_device_unregister(data->pdev); + platform_device_del(data->pdev); +dev_add_err: + platform_device_put(data->pdev); dev_err: ida_simple_remove(&aem_ida, data->id); id_err: diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ee568bdf3c78..3cc7a23fa69f 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1280,8 +1280,10 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, return ERR_CAST(cm_id_priv);
err = cm_init_listen(cm_id_priv, service_id, 0); - if (err) + if (err) { + ib_destroy_cm_id(&cm_id_priv->id); return ERR_PTR(err); + }
spin_lock_irq(&cm_id_priv->lock); listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 9dde70373a55..8ef6eecc42a0 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -418,6 +418,7 @@ struct qedr_qp { u32 sq_psn; u32 qkey; u32 dest_qp_num; + u8 timeout;
/* Relevant to qps created from kernel space only (ULPs) */ u8 prev_wqe_size; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index eeb87f31cd25..f7b97b8e81a4 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2622,6 +2622,8 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1 << max_t(int, attr->timeout - 8, 0); else qp_params.ack_timeout = 0; + + qp->timeout = attr->timeout; }
if (attr_mask & IB_QP_RETRY_CNT) { @@ -2781,7 +2783,7 @@ int qedr_query_qp(struct ib_qp *ibqp, rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]); rdma_ah_set_port_num(&qp_attr->ah_attr, 1); rdma_ah_set_sl(&qp_attr->ah_attr, 0); - qp_attr->timeout = params.timeout; + qp_attr->timeout = qp->timeout; qp_attr->rnr_retry = params.rnr_retry; qp_attr->retry_cnt = params.retry_cnt; qp_attr->min_rnr_timer = params.min_rnr_nak_timer; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f5083b4a0195..4e94200e0142 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1002,12 +1002,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) static int validate_raid_redundancy(struct raid_set *rs) { unsigned int i, rebuild_cnt = 0; - unsigned int rebuilds_per_group = 0, copies; + unsigned int rebuilds_per_group = 0, copies, raid_disks; unsigned int group_size, last_group_start;
- for (i = 0; i < rs->md.raid_disks; i++) - if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || - !rs->dev[i].rdev.sb_page) + for (i = 0; i < rs->raid_disks; i++) + if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) && + ((!test_bit(In_sync, &rs->dev[i].rdev.flags) || + !rs->dev[i].rdev.sb_page))) rebuild_cnt++;
switch (rs->md.level) { @@ -1047,8 +1048,9 @@ static int validate_raid_redundancy(struct raid_set *rs) * A A B B C * C D D E E */ + raid_disks = min(rs->raid_disks, rs->md.raid_disks); if (__is_raid10_near(rs->md.new_layout)) { - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1071,10 +1073,10 @@ static int validate_raid_redundancy(struct raid_set *rs) * results in the need to treat the last (potentially larger) * set differently. */ - group_size = (rs->md.raid_disks / copies); - last_group_start = (rs->md.raid_disks / group_size) - 1; + group_size = (raid_disks / copies); + last_group_start = (raid_disks / group_size) - 1; last_group_start *= group_size; - for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < raid_disks; i++) { if (!(i % copies) && !(i > last_group_start)) rebuilds_per_group = 0; if ((!rs->dev[i].rdev.sb_page || @@ -1589,7 +1591,7 @@ static sector_t __rdev_sectors(struct raid_set *rs) { int i;
- for (i = 0; i < rs->md.raid_disks; i++) { + for (i = 0; i < rs->raid_disks; i++) { struct md_rdev *rdev = &rs->dev[i].rdev;
if (!test_bit(Journal, &rdev->flags) && @@ -3732,13 +3734,13 @@ static int raid_iterate_devices(struct dm_target *ti, unsigned int i; int r = 0;
- for (i = 0; !r && i < rs->md.raid_disks; i++) - if (rs->dev[i].data_dev) - r = fn(ti, - rs->dev[i].data_dev, - 0, /* No offset on data devs */ - rs->md.dev_sectors, - data); + for (i = 0; !r && i < rs->raid_disks; i++) { + if (rs->dev[i].data_dev) { + r = fn(ti, rs->dev[i].data_dev, + 0, /* No offset on data devs */ + rs->md.dev_sectors, data); + } + }
return r; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 02767866b9ff..c8cafdb094aa 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -8004,6 +8004,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) */ if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && + rdev->saved_raid_disk <= last && conf->disks[rdev->saved_raid_disk].rdev == NULL) first = rdev->saved_raid_disk;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index c2cef7ba2671..325b20729d8b 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2209,7 +2209,8 @@ void bond_3ad_unbind_slave(struct slave *slave) temp_aggregator->num_of_ports--; if (__agg_active_ports(temp_aggregator) == 0) { select_new_active_agg = temp_aggregator->is_active; - ad_clear_agg(temp_aggregator); + if (temp_aggregator->num_of_ports == 0) + ad_clear_agg(temp_aggregator); if (select_new_active_agg) { slave_info(bond->dev, slave->dev, "Removing an active aggregator\n"); /* select new active aggregator */ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 0436aef9c9ef..152f76f86927 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1279,12 +1279,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) return res;
if (rlb_enabled) { - bond->alb_info.rlb_enabled = 1; res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); return res; } + bond->alb_info.rlb_enabled = 1; } else { bond->alb_info.rlb_enabled = 0; } diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 47a6d62b7511..a701932f5cc2 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -723,13 +723,21 @@ static int cfv_probe(struct virtio_device *vdev) /* Carrier is off until netdevice is opened */ netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + /* register Netdev */ - err = register_netdev(netdev); + err = register_netdevice(netdev); if (err) { + rtnl_unlock(); dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); goto err; }
+ virtio_device_ready(vdev); + + rtnl_unlock(); + debugfs_init(cfv);
return 0; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b712b4f27efd..c6563d212476 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -774,6 +774,11 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, if (duplex == DUPLEX_FULL) reg |= DUPLX_MODE;
+ if (tx_pause) + reg |= TXFLOW_CNTL; + if (rx_pause) + reg |= RXFLOW_CNTL; + core_writel(priv, reg, offset); }
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 51cd7dca91cd..3a7d2baec6c1 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1513,14 +1513,14 @@ static void epic_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct epic_private *ep = netdev_priv(dev);
+ unregister_netdev(dev); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); - unregister_netdev(dev); pci_iounmap(pdev, ep->ioaddr); - pci_release_regions(pdev); free_netdev(dev); + pci_release_regions(pdev); pci_disable_device(pdev); /* pci_power_off(pdev, -1); */ } diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 3d75b98f3051..3a8849716459 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -243,9 +243,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (misr_status < 0) return misr_status;
- misr_status |= (DP83822_RX_ERR_HF_INT_EN | - DP83822_FALSE_CARRIER_HF_INT_EN | - DP83822_LINK_STAT_INT_EN | + misr_status |= (DP83822_LINK_STAT_INT_EN | DP83822_ENERGY_DET_INT_EN | DP83822_LINK_QUAL_INT_EN);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 55ce141c93c7..be9ff6a74ecc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -279,6 +279,12 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, } }
+static void tun_napi_enable(struct tun_file *tfile) +{ + if (tfile->napi_enabled) + napi_enable(&tfile->napi); +} + static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) @@ -640,7 +646,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun = rtnl_dereference(tfile->tun);
if (tun && clean) { - tun_napi_disable(tfile); + if (!tfile->detached) + tun_napi_disable(tfile); tun_napi_del(tfile); }
@@ -659,8 +666,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean) if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); - } else + } else { tun_disable_queue(tun, tfile); + tun_napi_disable(tfile); + }
synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); @@ -733,6 +742,7 @@ static void tun_detach_all(struct net_device *dev) sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { + tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); @@ -813,6 +823,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
if (tfile->detached) { tun_enable_queue(tfile); + tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 0b0cbcee1920..79a53fe245e5 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1471,6 +1471,42 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) * are bundled into this buffer and where we can find an array of * per-packet metadata (which contains elements encoded into u16). */ + + /* SKB contents for current firmware: + * <packet 1> <padding> + * ... + * <packet N> <padding> + * <per-packet metadata entry 1> <dummy header> + * ... + * <per-packet metadata entry N> <dummy header> + * <padding2> <rx_hdr> + * + * where: + * <packet N> contains pkt_len bytes: + * 2 bytes of IP alignment pseudo header + * packet received + * <per-packet metadata entry N> contains 4 bytes: + * pkt_len and fields AX_RXHDR_* + * <padding> 0-7 bytes to terminate at + * 8 bytes boundary (64-bit). + * <padding2> 4 bytes to make rx_hdr terminate at + * 8 bytes boundary (64-bit) + * <dummy-header> contains 4 bytes: + * pkt_len=0 and AX_RXHDR_DROP_ERR + * <rx-hdr> contains 4 bytes: + * pkt_cnt and hdr_off (offset of + * <per-packet metadata entry 1>) + * + * pkt_cnt is number of entrys in the per-packet metadata. + * In current firmware there is 2 entrys per packet. + * The first points to the packet and the + * second is a dummy header. + * This was done probably to align fields in 64-bit and + * maintain compatibility with old firmware. + * This code assumes that <dummy header> and <padding2> are + * optional. + */ + if (skb->len < 4) return 0; skb_trim(skb, skb->len - 4); @@ -1484,51 +1520,66 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* Make sure that the bounds of the metadata array are inside the SKB * (and in front of the counter at the end). */ - if (pkt_cnt * 2 + hdr_off > skb->len) + if (pkt_cnt * 4 + hdr_off > skb->len) return 0; pkt_hdr = (u32 *)(skb->data + hdr_off);
/* Packets must not overlap the metadata array */ skb_trim(skb, hdr_off);
- for (; ; pkt_cnt--, pkt_hdr++) { + for (; pkt_cnt > 0; pkt_cnt--, pkt_hdr++) { + u16 pkt_len_plus_padd; u16 pkt_len;
le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; + pkt_len_plus_padd = (pkt_len + 7) & 0xfff8;
- if (pkt_len > skb->len) + /* Skip dummy header used for alignment + */ + if (pkt_len == 0) + continue; + + if (pkt_len_plus_padd > skb->len) return 0;
/* Check CRC or runt packet */ - if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && - pkt_len >= 2 + ETH_HLEN) { - bool last = (pkt_cnt == 0); - - if (last) { - ax_skb = skb; - } else { - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (!ax_skb) - return 0; - } - ax_skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(ax_skb, 2); - skb_set_tail_pointer(ax_skb, ax_skb->len); - ax_skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(ax_skb, pkt_hdr); + if ((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) || + pkt_len < 2 + ETH_HLEN) { + dev->net->stats.rx_errors++; + skb_pull(skb, pkt_len_plus_padd); + continue; + }
- if (last) - return 1; + /* last packet */ + if (pkt_len_plus_padd == skb->len) { + skb_trim(skb, pkt_len);
- usbnet_skb_return(dev, ax_skb); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + + skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd); + ax88179_rx_checksum(skb, pkt_hdr); + return 1; }
- /* Trim this packet away from the SKB */ - if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) return 0; + skb_trim(ax_skb, pkt_len); + + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + + skb->truesize = pkt_len_plus_padd + + SKB_DATA_ALIGN(sizeof(struct sk_buff)); + ax88179_rx_checksum(ax_skb, pkt_hdr); + usbnet_skb_return(dev, ax_skb); + + skb_pull(skb, pkt_len_plus_padd); } + + return 0; }
static struct sk_buff * diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 597766d14563..48e8b94e4a7c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1293,6 +1293,8 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 402390b1a66b..74a833ad7aa9 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1969,7 +1969,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size);
if (size) { - buf = kmalloc(size, GFP_KERNEL); + buf = kmalloc(size, GFP_NOIO); if (!buf) goto out; } @@ -2001,7 +2001,7 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size);
if (data) { - buf = kmemdup(data, size, GFP_KERNEL); + buf = kmemdup(data, size, GFP_NOIO); if (!buf) goto out; } else { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ad9064df3deb..37178b078ee3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3171,14 +3171,20 @@ static int virtnet_probe(struct virtio_device *vdev) } }
- err = register_netdev(dev); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + + err = register_netdevice(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); + rtnl_unlock(); goto free_failover; }
virtio_device_ready(vdev);
+ rtnl_unlock(); + err = virtnet_cpu_notif_add(vi); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 1a69b5246133..569f3c8e7b75 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface");
+static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define XENNET_TIMEOUT (5 * HZ)
static const struct ethtool_ops xennet_ethtool_ops; @@ -175,6 +179,9 @@ struct netfront_info { /* Is device behaving sane? */ bool broken;
+ /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; };
@@ -273,7 +280,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL;
- page = page_pool_dev_alloc_pages(queue->page_pool); + page = page_pool_alloc_pages(queue->page_pool, + GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; @@ -669,6 +677,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, return n - drops; }
+struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
@@ -722,9 +757,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -1057,8 +1096,10 @@ static int xennet_get_responses(struct netfront_queue *queue, } } rcu_read_unlock(); -next: + __skb_queue_tail(list, skb); + +next: if (!(rx->flags & XEN_NETRXF_more_data)) break;
@@ -2248,6 +2289,10 @@ static int talk_to_netback(struct xenbus_device *dev,
info->netdev->irq = 0;
+ /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -2414,6 +2459,9 @@ static int xennet_connect(struct net_device *dev) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n");
/* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index 18cd96284b77..f81f1cae9324 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -186,9 +186,9 @@ static int nfcmrvl_i2c_parse_dt(struct device_node *node, pdata->irq_polarity = IRQF_TRIGGER_RISING;
ret = irq_of_parse_and_map(node, 0); - if (ret < 0) { - pr_err("Unable to get irq, error: %d\n", ret); - return ret; + if (!ret) { + pr_err("Unable to get irq\n"); + return -EINVAL; } pdata->irq = ret;
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index 8e0ddb434770..1f4120e3314b 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -129,9 +129,9 @@ static int nfcmrvl_spi_parse_dt(struct device_node *node, }
ret = irq_of_parse_and_map(node, 0); - if (ret < 0) { - pr_err("Unable to get irq, error: %d\n", ret); - return ret; + if (!ret) { + pr_err("Unable to get irq\n"); + return -EINVAL; } pdata->irq = ret;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 9f60e4dc5a90..3943a30053b3 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -162,6 +162,9 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy,
skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE);
+ if (!header.plen) + return 0; + r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen); if (r != header.plen) { nfc_err(&client->dev, diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2304c6183822..9ec59960f216 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -187,8 +187,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data) ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */ - if (ctx->phys < nd_region->ndr_start - || (ctx->phys + ctx->cleared) > ndr_end) + if (ctx->phys < nd_region->ndr_start || + (ctx->phys + ctx->cleared - 1) > ndr_end) return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9e633f4dcec7..3622c5c9515f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3245,7 +3245,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ - .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 20d7d059dadb..40ef379c28ab 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -16,6 +16,7 @@ #include <linux/mmu_notifier.h> #include <linux/types.h> #include <xen/interface/event_channel.h> +#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@@ -56,6 +57,7 @@ struct gntdev_grant_map { struct gnttab_unmap_grant_ref *unmap_ops; struct gnttab_map_grant_ref *kmap_ops; struct gnttab_unmap_grant_ref *kunmap_ops; + bool *being_removed; struct page **pages; unsigned long pages_vm_start;
@@ -73,6 +75,11 @@ struct gntdev_grant_map { /* Needed to avoid allocation in gnttab_dma_free_pages(). */ xen_pfn_t *frames; #endif + + /* Number of live grants */ + atomic_t live_grants; + /* Needed to avoid allocation in __unmap_grant_pages */ + struct gntab_unmap_queue_data unmap_data; };
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 54778aadf618..f415c056ff8a 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/highmem.h> #include <linux/refcount.h> +#include <linux/workqueue.h>
#include <xen/xen.h> #include <xen/grant_table.h> @@ -60,10 +61,11 @@ module_param(limit, uint, 0644); MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by one mapping request");
+/* True in PV mode, false otherwise */ static int use_ptemod;
-static int unmap_grant_pages(struct gntdev_grant_map *map, - int offset, int pages); +static void unmap_grant_pages(struct gntdev_grant_map *map, + int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map) kvfree(map->unmap_ops); kvfree(map->kmap_ops); kvfree(map->kunmap_ops); + kvfree(map->being_removed); kfree(map); }
@@ -140,12 +143,15 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, add->kunmap_ops = kvcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL); add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); + add->being_removed = + kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || NULL == add->kmap_ops || NULL == add->kunmap_ops || - NULL == add->pages) + NULL == add->pages || + NULL == add->being_removed) goto err;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC @@ -240,9 +246,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) if (!refcount_dec_and_test(&map->users)) return;
- if (map->pages && !use_ptemod) + if (map->pages && !use_ptemod) { + /* + * Increment the reference count. This ensures that the + * subsequent call to unmap_grant_pages() will not wind up + * re-entering itself. It *can* wind up calling + * gntdev_put_map() recursively, but such calls will be with a + * reference count greater than 1, so they will return before + * this code is reached. The recursion depth is thus limited to + * 1. Do NOT use refcount_inc() here, as it will detect that + * the reference count is zero and WARN(). + */ + refcount_set(&map->users, 1); + + /* + * Unmap the grants. This may or may not be asynchronous, so it + * is possible that the reference count is 1 on return, but it + * could also be greater than 1. + */ unmap_grant_pages(map, 0, map->count);
+ /* Check if the memory now needs to be freed */ + if (!refcount_dec_and_test(&map->users)) + return; + + /* + * All pages have been returned to the hypervisor, so free the + * map. + */ + } + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); @@ -288,6 +321,7 @@ static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
int gntdev_map_grant_pages(struct gntdev_grant_map *map) { + size_t alloced = 0; int i, err = 0;
if (!use_ptemod) { @@ -336,87 +370,109 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->pages, map->count);
for (i = 0; i < map->count; i++) { - if (map->map_ops[i].status == GNTST_okay) + if (map->map_ops[i].status == GNTST_okay) { map->unmap_ops[i].handle = map->map_ops[i].handle; - else if (!err) + if (!use_ptemod) + alloced++; + } else if (!err) err = -EINVAL;
if (map->flags & GNTMAP_device_map) map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) { - if (map->kmap_ops[i].status == GNTST_okay) + if (map->kmap_ops[i].status == GNTST_okay) { + if (map->map_ops[i].status == GNTST_okay) + alloced++; map->kunmap_ops[i].handle = map->kmap_ops[i].handle; - else if (!err) + } else if (!err) err = -EINVAL; } } + atomic_add(alloced, &map->live_grants); return err; }
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void __unmap_grant_pages_done(int result, + struct gntab_unmap_queue_data *data) { - int i, err = 0; - struct gntab_unmap_queue_data unmap_data; + unsigned int i; + struct gntdev_grant_map *map = data->data; + unsigned int offset = data->unmap_ops - map->unmap_ops;
+ for (i = 0; i < data->count; i++) { + WARN_ON(map->unmap_ops[offset+i].status); + pr_debug("unmap handle=%d st=%d\n", + map->unmap_ops[offset+i].handle, + map->unmap_ops[offset+i].status); + map->unmap_ops[offset+i].handle = -1; + } + /* + * Decrease the live-grant counter. This must happen after the loop to + * prevent premature reuse of the grants by gnttab_mmap(). + */ + atomic_sub(data->count, &map->live_grants); + + /* Release reference taken by __unmap_grant_pages */ + gntdev_put_map(NULL, map); +} + +static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) +{ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { int pgno = (map->notify.addr >> PAGE_SHIFT); + if (pgno >= offset && pgno < offset + pages) { /* No need for kmap, pages are in lowmem */ uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); + tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; } }
- unmap_data.unmap_ops = map->unmap_ops + offset; - unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; - unmap_data.pages = map->pages + offset; - unmap_data.count = pages; + map->unmap_data.unmap_ops = map->unmap_ops + offset; + map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; + map->unmap_data.pages = map->pages + offset; + map->unmap_data.count = pages; + map->unmap_data.done = __unmap_grant_pages_done; + map->unmap_data.data = map; + refcount_inc(&map->users); /* to keep map alive during async call below */
- err = gnttab_unmap_refs_sync(&unmap_data); - if (err) - return err; - - for (i = 0; i < pages; i++) { - if (map->unmap_ops[offset+i].status) - err = -EINVAL; - pr_debug("unmap handle=%d st=%d\n", - map->unmap_ops[offset+i].handle, - map->unmap_ops[offset+i].status); - map->unmap_ops[offset+i].handle = -1; - } - return err; + gnttab_unmap_refs_async(&map->unmap_data); }
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) { - int range, err = 0; + int range; + + if (atomic_read(&map->live_grants) == 0) + return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we * already unmapped some of the grants. Only unmap valid ranges. */ - while (pages && !err) { - while (pages && map->unmap_ops[offset].handle == -1) { + while (pages) { + while (pages && map->being_removed[offset]) { offset++; pages--; } range = 0; while (range < pages) { - if (map->unmap_ops[offset+range].handle == -1) + if (map->being_removed[offset + range]) break; + map->being_removed[offset + range] = true; range++; } - err = __unmap_grant_pages(map, offset, range); + if (range) + __unmap_grant_pages(map, offset, range); offset += range; pages -= range; } - - return err; }
/* ------------------------------------------------------------------ */ @@ -468,7 +524,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, struct gntdev_grant_map *map = container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; - int err;
if (!mmu_notifier_range_blockable(range)) return false; @@ -489,10 +544,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, map->index, map->count, map->vma->vm_start, map->vma->vm_end, range->start, range->end, mstart, mend); - err = unmap_grant_pages(map, + unmap_grant_pages(map, (mstart - map->vma->vm_start) >> PAGE_SHIFT, (mend - mstart) >> PAGE_SHIFT); - WARN_ON(err);
return true; } @@ -980,6 +1034,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto unlock_out; if (use_ptemod && map->vma) goto unlock_out; + if (atomic_read(&map->live_grants)) { + err = -EAGAIN; + goto unlock_out; + } refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops; diff --git a/fs/io_uring.c b/fs/io_uring.c index 2e12dcbc7b0f..023c3eb34dcc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4366,6 +4366,8 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; + if (unlikely(sqe->addr2 || sqe->splice_fd_in || sqe->ioprio)) + return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); @@ -4601,6 +4603,8 @@ static int io_recvmsg_prep(struct io_kiocb *req,
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; + if (unlikely(sqe->addr2 || sqe->splice_fd_in || sqe->ioprio)) + return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index dd33b31b0a82..86297f59b43e 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1460,13 +1460,6 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) PF_MEMALLOC)) goto redirty;
- /* - * Given that we do not allow direct reclaim to call us, we should - * never be called in a recursive filesystem reclaim context. - */ - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) - goto redirty; - /* * Is this page beyond the end of the file? * diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 548ebc913f92..c852bb5ff212 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1156,6 +1156,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, nfsd_net_id)); err2 = filemap_check_wb_err(nf->nf_file->f_mapping, since); + err = nfserrno(err2); break; case -EINVAL: err = nfserr_notsupp; @@ -1163,8 +1164,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, default: nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id)); + err = nfserrno(err2); } - err = nfserrno(err2); } else nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net, nfsd_net_id)); diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 98c82f4935e1..24c7d30e41df 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -2811,7 +2811,7 @@ xfs_btree_split_worker( struct xfs_btree_split_args *args = container_of(work, struct xfs_btree_split_args, work); unsigned long pflags; - unsigned long new_pflags = PF_MEMALLOC_NOFS; + unsigned long new_pflags = 0;
/* * we are in a transaction context here, but may also be doing work @@ -2823,12 +2823,20 @@ xfs_btree_split_worker( new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
current_set_flags_nested(&pflags, new_pflags); + xfs_trans_set_context(args->cur->bc_tp);
args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, args->key, args->curp, args->stat); - complete(args->done);
+ xfs_trans_clear_context(args->cur->bc_tp); current_restore_flags_nested(&pflags, new_pflags); + + /* + * Do not access args after complete() has run here. We don't own args + * and the owner may run and free args before we return here. + */ + complete(args->done); + }
/* diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 5aeafa59ed27..66e8353da2f3 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -956,9 +956,19 @@ xfs_log_sb( struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *bp = xfs_trans_getsb(tp);
- mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); - mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); - mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + /* + * Lazy sb counters don't update the in-core superblock so do that now. + * If this is at unmount, the counters will be exactly correct, but at + * any other time they will only be ballpark correct because of + * reservations that have been taken out percpu counters. If we have an + * unclean shutdown, this will be corrected by log recovery rebuilding + * the counters from the AGF block counts. + */ + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) { + mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); + mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree); + mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + }
xfs_sb_to_disk(bp->b_addr, &mp->m_sb); xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 4b76a32d2f16..953de843d9c3 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -62,7 +62,7 @@ xfs_setfilesize_trans_alloc( * We hand off the transaction to the completion thread now, so * clear the flag here. */ - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); + xfs_trans_clear_context(tp); return 0; }
@@ -125,7 +125,7 @@ xfs_setfilesize_ioend( * thus we need to mark ourselves as being in a transaction manually. * Similarly for freeze protection. */ - current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); + xfs_trans_set_context(tp); __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
/* we abort the update if there was an IO error */ @@ -577,6 +577,12 @@ xfs_vm_writepage( { struct xfs_writepage_ctx wpc = { };
+ if (WARN_ON_ONCE(current->journal_info)) { + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; + } + return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops); }
@@ -587,6 +593,13 @@ xfs_vm_writepages( { struct xfs_writepage_ctx wpc = { };
+ /* + * Writing back data in a transaction context can result in recursive + * transactions. This is bad, so issue a warning and get out of here. + */ + if (WARN_ON_ONCE(current->journal_info)) + return 0; + xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); } diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 7f6e20899473..f9e2f606b5b8 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -293,6 +293,8 @@ xfs_errortag_add( struct xfs_mount *mp, unsigned int error_tag) { + BUILD_BUG_ON(ARRAY_SIZE(xfs_errortag_random_default) != XFS_ERRTAG_MAX); + if (error_tag >= XFS_ERRTAG_MAX) return -EINVAL;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 6fa05fb78189..aa46b75d75af 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1503,7 +1503,8 @@ xfs_reflink_unshare( if (error) goto out;
- error = filemap_write_and_wait_range(inode->i_mapping, offset, len); + error = filemap_write_and_wait_range(inode->i_mapping, offset, + offset + len - 1); if (error) goto out;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 05cea7788d49..6323974d6b3e 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1155,6 +1155,22 @@ suffix_kstrtoint( return ret; }
+static inline void +xfs_fs_warn_deprecated( + struct fs_context *fc, + struct fs_parameter *param, + uint64_t flag, + bool value) +{ + /* Don't print the warning if reconfiguring and current mount point + * already had the flag set + */ + if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && + !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value) + return; + xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); +} + /* * Set mount state from a mount option. * @@ -1165,7 +1181,7 @@ xfs_fc_parse_param( struct fs_context *fc, struct fs_parameter *param) { - struct xfs_mount *mp = fc->s_fs_info; + struct xfs_mount *parsing_mp = fc->s_fs_info; struct fs_parse_result result; int size = 0; int opt; @@ -1176,142 +1192,142 @@ xfs_fc_parse_param(
switch (opt) { case Opt_logbufs: - mp->m_logbufs = result.uint_32; + parsing_mp->m_logbufs = result.uint_32; return 0; case Opt_logbsize: - if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize)) + if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) return -EINVAL; return 0; case Opt_logdev: - kfree(mp->m_logname); - mp->m_logname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_logname) + kfree(parsing_mp->m_logname); + parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_logname) return -ENOMEM; return 0; case Opt_rtdev: - kfree(mp->m_rtname); - mp->m_rtname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_rtname) + kfree(parsing_mp->m_rtname); + parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_rtname) return -ENOMEM; return 0; case Opt_allocsize: if (suffix_kstrtoint(param->string, 10, &size)) return -EINVAL; - mp->m_allocsize_log = ffs(size) - 1; - mp->m_flags |= XFS_MOUNT_ALLOCSIZE; + parsing_mp->m_allocsize_log = ffs(size) - 1; + parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE; return 0; case Opt_grpid: case Opt_bsdgroups: - mp->m_flags |= XFS_MOUNT_GRPID; + parsing_mp->m_flags |= XFS_MOUNT_GRPID; return 0; case Opt_nogrpid: case Opt_sysvgroups: - mp->m_flags &= ~XFS_MOUNT_GRPID; + parsing_mp->m_flags &= ~XFS_MOUNT_GRPID; return 0; case Opt_wsync: - mp->m_flags |= XFS_MOUNT_WSYNC; + parsing_mp->m_flags |= XFS_MOUNT_WSYNC; return 0; case Opt_norecovery: - mp->m_flags |= XFS_MOUNT_NORECOVERY; + parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY; return 0; case Opt_noalign: - mp->m_flags |= XFS_MOUNT_NOALIGN; + parsing_mp->m_flags |= XFS_MOUNT_NOALIGN; return 0; case Opt_swalloc: - mp->m_flags |= XFS_MOUNT_SWALLOC; + parsing_mp->m_flags |= XFS_MOUNT_SWALLOC; return 0; case Opt_sunit: - mp->m_dalign = result.uint_32; + parsing_mp->m_dalign = result.uint_32; return 0; case Opt_swidth: - mp->m_swidth = result.uint_32; + parsing_mp->m_swidth = result.uint_32; return 0; case Opt_inode32: - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS; return 0; case Opt_inode64: - mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; return 0; case Opt_nouuid: - mp->m_flags |= XFS_MOUNT_NOUUID; + parsing_mp->m_flags |= XFS_MOUNT_NOUUID; return 0; case Opt_largeio: - mp->m_flags |= XFS_MOUNT_LARGEIO; + parsing_mp->m_flags |= XFS_MOUNT_LARGEIO; return 0; case Opt_nolargeio: - mp->m_flags &= ~XFS_MOUNT_LARGEIO; + parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO; return 0; case Opt_filestreams: - mp->m_flags |= XFS_MOUNT_FILESTREAMS; + parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS; return 0; case Opt_noquota: - mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; - mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; - mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; return 0; case Opt_quota: case Opt_uquota: case Opt_usrquota: - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | + parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | XFS_UQUOTA_ENFD); return 0; case Opt_qnoenforce: case Opt_uqnoenforce: - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_UQUOTA_ENFD; + parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); + parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; return 0; case Opt_pquota: case Opt_prjquota: - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | + parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | XFS_PQUOTA_ENFD); return 0; case Opt_pqnoenforce: - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_PQUOTA_ENFD; + parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); + parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; return 0; case Opt_gquota: case Opt_grpquota: - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | XFS_GQUOTA_ENFD); return 0; case Opt_gqnoenforce: - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_GQUOTA_ENFD; + parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); + parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; return 0; case Opt_discard: - mp->m_flags |= XFS_MOUNT_DISCARD; + parsing_mp->m_flags |= XFS_MOUNT_DISCARD; return 0; case Opt_nodiscard: - mp->m_flags &= ~XFS_MOUNT_DISCARD; + parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD; return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); + xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); return 0; case Opt_dax_enum: - xfs_mount_set_dax_mode(mp, result.uint_32); + xfs_mount_set_dax_mode(parsing_mp, result.uint_32); return 0; #endif /* Following mount options will be removed in September 2025 */ case Opt_ikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true); + parsing_mp->m_flags |= XFS_MOUNT_IKEEP; return 0; case Opt_noikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false); + parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP; return 0; case Opt_attr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_ATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true); + parsing_mp->m_flags |= XFS_MOUNT_ATTR2; return 0; case Opt_noattr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_ATTR2; - mp->m_flags |= XFS_MOUNT_NOATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true); + parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2; + parsing_mp->m_flags |= XFS_MOUNT_NOATTR2; return 0; default: - xfs_warn(mp, "unknown mount option [%s].", param->key); + xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); return -EINVAL; }
@@ -1918,7 +1934,7 @@ xfs_init_zones(void) if (!xfs_ifork_zone) goto out_destroy_da_state_zone;
- xfs_trans_zone = kmem_cache_create("xf_trans", + xfs_trans_zone = kmem_cache_create("xfs_trans", sizeof(struct xfs_trans), 0, 0, NULL); if (!xfs_trans_zone) diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index c94e71f741b6..36166bae24a6 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -68,6 +68,7 @@ xfs_trans_free( xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
trace_xfs_trans_free(tp, _RET_IP_); + xfs_trans_clear_context(tp); if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) sb_end_intwrite(tp->t_mountp->m_super); xfs_trans_free_dqinfo(tp); @@ -119,7 +120,8 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; tp->t_rtx_res = tp->t_rtx_res_used; - ntp->t_pflags = tp->t_pflags; + + xfs_trans_switch_context(tp, ntp);
/* move deferred ops over to the new tp */ xfs_defer_move(ntp, tp); @@ -153,9 +155,6 @@ xfs_trans_reserve( int error = 0; bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
- /* Mark this thread as being in a transaction */ - current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - /* * Attempt to reserve the needed disk blocks by decrementing * the number needed from the number available. This will @@ -163,10 +162,8 @@ xfs_trans_reserve( */ if (blocks > 0) { error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); - if (error != 0) { - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); + if (error != 0) return -ENOSPC; - } tp->t_blk_res += blocks; }
@@ -240,9 +237,6 @@ xfs_trans_reserve( xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); tp->t_blk_res = 0; } - - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - return error; }
@@ -266,6 +260,7 @@ xfs_trans_alloc( tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL); if (!(flags & XFS_TRANS_NO_WRITECOUNT)) sb_start_intwrite(mp->m_super); + xfs_trans_set_context(tp);
/* * Zero-reservation ("empty") transactions can't modify anything, so @@ -620,6 +615,9 @@ xfs_trans_unreserve_and_mod_sb(
/* apply remaining deltas */ spin_lock(&mp->m_sb_lock); + mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; + mp->m_sb.sb_icount += idelta; + mp->m_sb.sb_ifree += ifreedelta; mp->m_sb.sb_frextents += rtxdelta; mp->m_sb.sb_dblocks += tp->t_dblocks_delta; mp->m_sb.sb_agcount += tp->t_agcount_delta; @@ -878,7 +876,6 @@ __xfs_trans_commit(
xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
- current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); xfs_trans_free(tp);
/* @@ -910,7 +907,6 @@ __xfs_trans_commit( xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); tp->t_ticket = NULL; } - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); xfs_trans_free_items(tp, !!error); xfs_trans_free(tp);
@@ -970,9 +966,6 @@ xfs_trans_cancel( tp->t_ticket = NULL; }
- /* mark this thread as no longer being in a transaction */ - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - xfs_trans_free_items(tp, dirty); xfs_trans_free(tp); } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 084658946cc8..075eeade4f7d 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -268,4 +268,34 @@ xfs_trans_item_relog( return lip->li_ops->iop_relog(lip, tp); }
+static inline void +xfs_trans_set_context( + struct xfs_trans *tp) +{ + ASSERT(current->journal_info == NULL); + tp->t_pflags = memalloc_nofs_save(); + current->journal_info = tp; +} + +static inline void +xfs_trans_clear_context( + struct xfs_trans *tp) +{ + if (current->journal_info == tp) { + memalloc_nofs_restore(tp->t_pflags); + current->journal_info = NULL; + } +} + +static inline void +xfs_trans_switch_context( + struct xfs_trans *old_tp, + struct xfs_trans *new_tp) +{ + ASSERT(current->journal_info == old_tp); + new_tp->t_pflags = old_tp->t_pflags; + old_tp->t_pflags = 0; + current->journal_info = new_tp; +} + #endif /* __XFS_TRANS_H__ */ diff --git a/include/linux/dim.h b/include/linux/dim.h index b698266d0035..6c5733981563 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -21,7 +21,7 @@ * We consider 10% difference as significant. */ #define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100UL * abs((val) - (ref))) / (ref)) > 10) + ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
/* * Calculate the gap between two values. diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index e25be2d01a7a..4b74c67f13c9 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -410,7 +410,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, u32 mtu = dst_mtu(encap_dst) - headroom;
if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) || - (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu)) + (!skb_is_gso(skb) && (skb->len - skb_network_offset(skb)) <= mtu)) return 0;
skb_dst_update_pmtu_no_confirm(skb, mtu); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 017cd666387f..32c60122db9c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1980,7 +1980,8 @@ int tcp_v4_rcv(struct sk_buff *skb) struct sock *nsk;
sk = req->rsk_listener; - if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { + if (unlikely(!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb) || + tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; @@ -2019,6 +2020,7 @@ int tcp_v4_rcv(struct sk_buff *skb) } goto discard_and_relse; } + nf_reset_ct(skb); if (nsk == sk) { reqsk_put(req); tcp_v4_restore_cb(skb); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0562fb321959..05317e6f48f8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1102,10 +1102,6 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, goto out; }
- if (net->ipv6.devconf_all->disable_policy || - idev->cnf.disable_policy) - f6i->dst_nopolicy = true; - neigh_parms_data_state_setall(idev->nd_parms);
ifa->addr = *cfg->pfx; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6ace9f0ac22f..e67505c6d856 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4479,8 +4479,15 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net, }
f6i = ip6_route_info_create(&cfg, gfp_flags, NULL); - if (!IS_ERR(f6i)) + if (!IS_ERR(f6i)) { f6i->dst_nocount = true; + + if (!anycast && + (net->ipv6.devconf_all->disable_policy || + idev->cnf.disable_policy)) + f6i->dst_nopolicy = true; + } + return f6i; }
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index b9179708e3c1..552bce1fdfb9 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -409,7 +409,6 @@ int __net_init seg6_hmac_net_init(struct net *net)
return 0; } -EXPORT_SYMBOL(seg6_hmac_net_init);
void seg6_hmac_exit(void) { diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index bab0e99f6e35..3c92e8cacbba 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -321,9 +321,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) : NULL;
- rcu_read_lock(); - - ca = t->prl_count < cmax ? t->prl_count : cmax; + ca = min(t->prl_count, cmax);
if (!kp) { /* We don't try hard to allocate much memory for @@ -338,7 +336,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) } }
- c = 0; + rcu_read_lock(); for_each_prl_rcu(t->prl) { if (c >= cmax) break; @@ -350,7 +348,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) if (kprl.addr != htonl(INADDR_ANY)) break; } -out: + rcu_read_unlock();
len = sizeof(*kp) * c; @@ -359,7 +357,7 @@ static int ipip6_tunnel_get_prl(struct net_device *dev, struct ifreq *ifr) ret = -EFAULT;
kfree(kp); - +out: return ret; }
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 858c8d4d659a..a5cfb321ae23 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -142,6 +142,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key, /* Another cpu may race to insert the element with the same key */ if (prev) { nft_set_elem_destroy(set, he, true); + atomic_dec(&set->nelems); he = prev; }
@@ -151,6 +152,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
err2: nft_set_elem_destroy(set, he, true); + atomic_dec(&set->nelems); err1: return false; } diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c index b3138fc2e552..f06ddbed3fed 100644 --- a/net/rose/rose_timer.c +++ b/net/rose/rose_timer.c @@ -31,89 +31,89 @@ static void rose_idletimer_expiry(struct timer_list *);
void rose_start_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer);
sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ;
- add_timer(&sk->sk_timer); + sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires); }
void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk);
- del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1;
- add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); }
void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk);
- del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2;
- add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); }
void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk);
- del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3;
- add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); }
void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk);
- del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer);
rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb;
- add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); }
void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk);
- del_timer(&rose->idletimer); + sk_stop_timer(sk, &rose->idletimer);
if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle;
- add_timer(&rose->idletimer); + sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires); } }
void rose_stop_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); }
void rose_stop_timer(struct sock *sk) { - del_timer(&rose_sk(sk)->timer); + sk_stop_timer(sk, &rose_sk(sk)->timer); }
void rose_stop_idletimer(struct sock *sk) { - del_timer(&rose_sk(sk)->idletimer); + sk_stop_timer(sk, &rose_sk(sk)->idletimer); }
static void rose_heartbeat_expiry(struct timer_list *t) @@ -130,6 +130,7 @@ static void rose_heartbeat_expiry(struct timer_list *t) (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); + sock_put(sk); return; } break; @@ -152,6 +153,7 @@ static void rose_heartbeat_expiry(struct timer_list *t)
rose_start_heartbeat(sk); bh_unlock_sock(sk); + sock_put(sk); }
static void rose_timer_expiry(struct timer_list *t) @@ -181,6 +183,7 @@ static void rose_timer_expiry(struct timer_list *t) break; } bh_unlock_sock(sk); + sock_put(sk); }
static void rose_idletimer_expiry(struct timer_list *t) @@ -205,4 +208,5 @@ static void rose_idletimer_expiry(struct timer_list *t) sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); + sock_put(sk); } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 7b29aa1a3ce9..4ab9c2a6f650 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -302,7 +302,8 @@ static int tcf_idr_release_unsafe(struct tc_action *p) }
static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, - const struct tc_action_ops *ops) + const struct tc_action_ops *ops, + struct netlink_ext_ack *extack) { struct nlattr *nest; int n_i = 0; @@ -318,20 +319,25 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure;
+ ret = 0; mutex_lock(&idrinfo->lock); idr_for_each_entry_ul(idr, p, tmp, id) { if (IS_ERR(p)) continue; ret = tcf_idr_release_unsafe(p); - if (ret == ACT_P_DELETED) { + if (ret == ACT_P_DELETED) module_put(ops->owner); - n_i++; - } else if (ret < 0) { - mutex_unlock(&idrinfo->lock); - goto nla_put_failure; - } + else if (ret < 0) + break; + n_i++; } mutex_unlock(&idrinfo->lock); + if (ret < 0) { + if (n_i) + NL_SET_ERR_MSG(extack, "Unable to flush all TC actions"); + else + goto nla_put_failure; + }
ret = nla_put_u32(skb, TCA_FCNT, n_i); if (ret) @@ -352,7 +358,7 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, struct tcf_idrinfo *idrinfo = tn->idrinfo;
if (type == RTM_DELACTION) { - return tcf_del_walker(idrinfo, skb, ops); + return tcf_del_walker(idrinfo, skb, ops, extack); } else if (type == RTM_GETACTION) { return tcf_dump_walker(idrinfo, skb, cb); } else { diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index c8ed6d3d5762..d84bb5037bb5 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -752,7 +752,7 @@ static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, */ xdr->p = (void *)p + frag2bytes; space_left = xdr->buf->buflen - xdr->buf->len; - if (space_left - nbytes >= PAGE_SIZE) + if (space_left - frag1bytes >= PAGE_SIZE) xdr->end = (void *)p + PAGE_SIZE; else xdr->end = (void *)p + space_left - frag1bytes; diff --git a/net/tipc/node.c b/net/tipc/node.c index e4452d55851f..60059827563a 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -456,8 +456,8 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, bool preliminary) { struct tipc_net *tn = net_generic(net, tipc_net_id); + struct tipc_link *l, *snd_l = tipc_bc_sndlink(net); struct tipc_node *n, *temp_node; - struct tipc_link *l; unsigned long intv; int bearer_id; int i; @@ -472,6 +472,16 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, goto exit; /* A preliminary node becomes "real" now, refresh its data */ tipc_node_write_lock(n); + if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, + tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), + n->capabilities, &n->bc_entry.inputq1, + &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { + pr_warn("Broadcast rcv link refresh failed, no memory\n"); + tipc_node_write_unlock_fast(n); + tipc_node_put(n); + n = NULL; + goto exit; + } n->preliminary = false; n->addr = addr; hlist_del_rcu(&n->hash); @@ -551,7 +561,16 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, n->signature = INVALID_NODE_SIG; n->active_links[0] = INVALID_BEARER_ID; n->active_links[1] = INVALID_BEARER_ID; - n->bc_entry.link = NULL; + if (!preliminary && + !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX, + tipc_link_min_win(snd_l), tipc_link_max_win(snd_l), + n->capabilities, &n->bc_entry.inputq1, + &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { + pr_warn("Broadcast rcv link creation failed, no memory\n"); + kfree(n); + n = NULL; + goto exit; + } tipc_node_get(n); timer_setup(&n->timer, tipc_node_timeout, 0); /* Start a slow timer anyway, crypto needs it */ @@ -1128,7 +1147,7 @@ void tipc_node_check_dest(struct net *net, u32 addr, bool *respond, bool *dupl_addr) { struct tipc_node *n; - struct tipc_link *l, *snd_l; + struct tipc_link *l; struct tipc_link_entry *le; bool addr_match = false; bool sign_match = false; @@ -1148,22 +1167,6 @@ void tipc_node_check_dest(struct net *net, u32 addr, return;
tipc_node_write_lock(n); - if (unlikely(!n->bc_entry.link)) { - snd_l = tipc_bc_sndlink(net); - if (!tipc_link_bc_create(net, tipc_own_addr(net), - addr, peer_id, U16_MAX, - tipc_link_min_win(snd_l), - tipc_link_max_win(snd_l), - n->capabilities, - &n->bc_entry.inputq1, - &n->bc_entry.namedq, snd_l, - &n->bc_entry.link)) { - pr_warn("Broadcast rcv link creation failed, no mem\n"); - tipc_node_write_unlock_fast(n); - tipc_node_put(n); - return; - } - }
le = &n->links[b->identity];
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index 80b5d352702e..dc932fd65363 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh @@ -120,7 +120,7 @@ run_all() { run_udp "${ipv4_args}"
echo "ipv6" - run_tcp "${ipv4_args}" + run_tcp "${ipv6_args}" run_udp "${ipv6_args}" }
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index 2af9d39a9716..215e1067f037 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -6,7 +6,7 @@ endif
CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ $(CLANG_FLAGS) -LDLIBS += -lpthread +LDLIBS += -lpthread -ldl
# Own dependencies because we only want to build against 1st prerequisite, but # still track changes to header files and depend on shared object. diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c index eb3f6db36d36..517756afc2a4 100644 --- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c +++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c @@ -9,10 +9,9 @@ #include <string.h> #include <stddef.h>
+#include "../kselftest.h" #include "rseq.h"
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - struct percpu_lock_entry { intptr_t v; } __attribute__((aligned(128))); @@ -168,7 +167,7 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, for (;;) { struct percpu_list_node *head; intptr_t *targetptr, expectnot, *load; - off_t offset; + long offset; int ret, cpu;
cpu = rseq_cpu_start(); diff --git a/tools/testing/selftests/rseq/compiler.h b/tools/testing/selftests/rseq/compiler.h new file mode 100644 index 000000000000..876eb6a7f75b --- /dev/null +++ b/tools/testing/selftests/rseq/compiler.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq/compiler.h + * + * Work-around asm goto compiler bugs. + * + * (C) Copyright 2021 - Mathieu Desnoyers mathieu.desnoyers@efficios.com + */ + +#ifndef RSEQ_COMPILER_H +#define RSEQ_COMPILER_H + +/* + * gcc prior to 4.8.2 miscompiles asm goto. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * gcc prior to 8.1.0 miscompiles asm goto at O1. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103908 + * + * clang prior to version 13.0.1 miscompiles asm goto at O2. + * https://github.com/llvm/llvm-project/issues/52735 + * + * Work around these issues by adding a volatile inline asm with + * memory clobber in the fallthrough after the asm goto and at each + * label target. Emit this for all compilers in case other similar + * issues are found in the future. + */ +#define rseq_after_asm_goto() asm volatile ("" : : : "memory") + +#endif /* RSEQ_COMPILER_H_ */ diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c index 384589095864..e29ecc7158e8 100644 --- a/tools/testing/selftests/rseq/param_test.c +++ b/tools/testing/selftests/rseq/param_test.c @@ -161,7 +161,7 @@ unsigned int yield_mod_cnt, nr_abort; " cbnz " INJECT_ASM_REG ", 222b\n" \ "333:\n"
-#elif __PPC__ +#elif defined(__PPC__)
#define RSEQ_INJECT_INPUT \ , [loop_cnt_1]"m"(loop_cnt[1]) \ @@ -368,9 +368,7 @@ void *test_percpu_spinlock_thread(void *arg) abort(); reps = thread_data->reps; for (i = 0; i < reps; i++) { - int cpu = rseq_cpu_start(); - - cpu = rseq_this_cpu_lock(&data->lock); + int cpu = rseq_this_cpu_lock(&data->lock); data->c[cpu].count++; rseq_percpu_unlock(&data->lock, cpu); #ifndef BENCHMARK @@ -551,7 +549,7 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, for (;;) { struct percpu_list_node *head; intptr_t *targetptr, expectnot, *load; - off_t offset; + long offset; int ret;
cpu = rseq_cpu_start(); diff --git a/tools/testing/selftests/rseq/rseq-abi.h b/tools/testing/selftests/rseq/rseq-abi.h new file mode 100644 index 000000000000..a8c44d9af71f --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-abi.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _RSEQ_ABI_H +#define _RSEQ_ABI_H + +/* + * rseq-abi.h + * + * Restartable sequences system call API + * + * Copyright (c) 2015-2022 Mathieu Desnoyers mathieu.desnoyers@efficios.com + */ + +#include <linux/types.h> +#include <asm/byteorder.h> + +enum rseq_abi_cpu_id_state { + RSEQ_ABI_CPU_ID_UNINITIALIZED = -1, + RSEQ_ABI_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_abi_flags { + RSEQ_ABI_FLAG_UNREGISTER = (1 << 0), +}; + +enum rseq_abi_cs_flags_bit { + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum rseq_abi_cs_flags { + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT), + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT), + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT), +}; + +/* + * struct rseq_abi_cs is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. It is usually declared as + * link-time constant data. + */ +struct rseq_abi_cs { + /* Version of this structure. */ + __u32 version; + /* enum rseq_abi_cs_flags */ + __u32 flags; + __u64 start_ip; + /* Offset from start_ip. */ + __u64 post_commit_offset; + __u64 abort_ip; +} __attribute__((aligned(4 * sizeof(__u64)))); + +/* + * struct rseq_abi is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. + * + * A single struct rseq_abi per thread is allowed. + */ +struct rseq_abi { + /* + * Restartable sequences cpu_id_start field. Updated by the + * kernel. Read by user-space with single-copy atomicity + * semantics. This field should only be read by the thread which + * registered this data structure. Aligned on 32-bit. Always + * contains a value in the range of possible CPUs, although the + * value may not be the actual current CPU (e.g. if rseq is not + * initialized). This CPU number value should always be compared + * against the value of the cpu_id field before performing a rseq + * commit or returning a value read from a data structure indexed + * using the cpu_id_start value. + */ + __u32 cpu_id_start; + /* + * Restartable sequences cpu_id field. Updated by the kernel. + * Read by user-space with single-copy atomicity semantics. This + * field should only be read by the thread which registered this + * data structure. Aligned on 32-bit. Values + * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED + * have a special semantic: the former means "rseq uninitialized", + * and latter means "rseq initialization failed". This value is + * meant to be read within rseq critical sections and compared + * with the cpu_id_start value previously read, before performing + * the commit instruction, or read and compared with the + * cpu_id_start value before returning a value loaded from a data + * structure indexed using the cpu_id_start value. + */ + __u32 cpu_id; + /* + * Restartable sequences rseq_cs field. + * + * Contains NULL when no critical section is active for the current + * thread, or holds a pointer to the currently active struct rseq_cs. + * + * Updated by user-space, which sets the address of the currently + * active rseq_cs at the beginning of assembly instruction sequence + * block, and set to NULL by the kernel when it restarts an assembly + * instruction sequence block, as well as when the kernel detects that + * it is preempting or delivering a signal outside of the range + * targeted by the rseq_cs. Also needs to be set to NULL by user-space + * before reclaiming memory that contains the targeted struct rseq_cs. + * + * Read and set by the kernel. Set by user-space with single-copy + * atomicity semantics. This field should only be updated by the + * thread which registered this data structure. Aligned on 64-bit. + */ + union { + __u64 ptr64; + + /* + * The "arch" field provides architecture accessor for + * the ptr field based on architecture pointer size and + * endianness. + */ + struct { +#ifdef __LP64__ + __u64 ptr; +#elif defined(__BYTE_ORDER) ? (__BYTE_ORDER == __BIG_ENDIAN) : defined(__BIG_ENDIAN) + __u32 padding; /* Initialized to zero. */ + __u32 ptr; +#else + __u32 ptr; + __u32 padding; /* Initialized to zero. */ +#endif + } arch; + } rseq_cs; + + /* + * Restartable sequences flags field. + * + * This field should only be updated by the thread which + * registered this data structure. Read by the kernel. + * Mainly used for single-stepping through rseq critical sections + * with debuggers. + * + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT + * Inhibit instruction sequence block restart on preemption + * for this thread. + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL + * Inhibit instruction sequence block restart on signal + * delivery for this thread. + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE + * Inhibit instruction sequence block restart on migration for + * this thread. + */ + __u32 flags; +} __attribute__((aligned(4 * sizeof(__u64)))); + +#endif /* _RSEQ_ABI_H */ diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h index 5943c816c07c..893a11eca9d5 100644 --- a/tools/testing/selftests/rseq/rseq-arm.h +++ b/tools/testing/selftests/rseq/rseq-arm.h @@ -147,14 +147,11 @@ do { \ teardown \ "b %l[" __rseq_str(cmpfail_label) "]\n\t"
-#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("") - static inline __attribute__((always_inline)) int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -185,8 +182,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -198,30 +195,31 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif }
static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -255,8 +253,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -270,19 +268,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -292,7 +292,6 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ #ifdef RSEQ_COMPARE_TWICE @@ -316,8 +315,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [count] "Ir" (count) RSEQ_INJECT_INPUT @@ -328,14 +327,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -347,7 +347,6 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -381,8 +380,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -398,19 +397,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -422,7 +423,6 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -457,8 +457,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -474,19 +474,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -498,7 +500,6 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -537,8 +538,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -554,21 +555,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -582,7 +586,6 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -657,8 +660,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -678,21 +681,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -706,7 +709,6 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -782,8 +784,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -803,21 +805,21 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h index 200dae9e4208..cbe190a4d005 100644 --- a/tools/testing/selftests/rseq/rseq-arm64.h +++ b/tools/testing/selftests/rseq/rseq-arm64.h @@ -230,8 +230,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -242,24 +242,28 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif }
static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
@@ -287,8 +291,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [expectnot] "r" (expectnot), [load] "Qo" (*load), @@ -300,16 +304,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -337,8 +346,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [count] "r" (count) RSEQ_INJECT_INPUT @@ -348,12 +357,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -388,8 +400,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -402,17 +414,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -447,8 +463,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -461,17 +477,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -508,8 +528,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "Qo" (*v), [expect] "r" (expect), [v2] "Qo" (*v2), @@ -522,19 +542,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -569,8 +594,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -584,17 +609,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -629,8 +658,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "Qo" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [expect] "r" (expect), [v] "Qo" (*v), [newv] "r" (newv), @@ -644,17 +673,21 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h b/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h new file mode 100644 index 000000000000..38c584661571 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-generic-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers mathieu.desnoyers@efficios.com + */ + +#ifndef _RSEQ_GENERIC_THREAD_POINTER +#define _RSEQ_GENERIC_THREAD_POINTER + +#ifdef __cplusplus +extern "C" { +#endif + +/* Use gcc builtin thread pointer. */ +static inline void *rseq_thread_pointer(void) +{ + return __builtin_thread_pointer(); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h index e989e7c14b09..878739fae2fd 100644 --- a/tools/testing/selftests/rseq/rseq-mips.h +++ b/tools/testing/selftests/rseq/rseq-mips.h @@ -154,14 +154,11 @@ do { \ teardown \ "b %l[" __rseq_str(cmpfail_label) "]\n\t"
-#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("") - static inline __attribute__((always_inline)) int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -190,8 +187,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -203,14 +200,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -222,11 +216,10 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -258,8 +251,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -273,14 +266,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -295,7 +285,6 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ #ifdef RSEQ_COMPARE_TWICE @@ -319,8 +308,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [count] "Ir" (count) RSEQ_INJECT_INPUT @@ -331,10 +320,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE @@ -350,7 +337,6 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -382,8 +368,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -399,14 +385,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -423,7 +406,6 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -456,8 +438,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -473,14 +455,11 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -497,7 +476,6 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, { RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -532,8 +510,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "5:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -549,14 +527,11 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: @@ -577,7 +552,6 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -649,8 +623,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -670,21 +644,16 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("expected value comparison failed"); #endif } @@ -698,7 +667,6 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
RSEQ_INJECT_C(9)
- rseq_workaround_gcc_asm_size_guess(); __asm__ __volatile__ goto ( RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) @@ -771,8 +739,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, "8:\n\t" : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -792,21 +760,16 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); - rseq_workaround_gcc_asm_size_guess(); return 0; abort: - rseq_workaround_gcc_asm_size_guess(); RSEQ_INJECT_FAILED return -1; cmpfail: - rseq_workaround_gcc_asm_size_guess(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("cpu_id comparison failed"); error2: - rseq_workaround_gcc_asm_size_guess(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h b/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h new file mode 100644 index 000000000000..263eee84fb76 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-ppc-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers mathieu.desnoyers@efficios.com + */ + +#ifndef _RSEQ_PPC_THREAD_POINTER +#define _RSEQ_PPC_THREAD_POINTER + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void *rseq_thread_pointer(void) +{ +#ifdef __powerpc64__ + register void *__result asm ("r13"); +#else + register void *__result asm ("r2"); +#endif + asm ("" : "=r" (__result)); + return __result; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h index 76be90196fe4..bab8e0b9fb11 100644 --- a/tools/testing/selftests/rseq/rseq-ppc.h +++ b/tools/testing/selftests/rseq/rseq-ppc.h @@ -47,10 +47,13 @@ do { \
#ifdef __PPC64__
-#define STORE_WORD "std " -#define LOAD_WORD "ld " -#define LOADX_WORD "ldx " -#define CMP_WORD "cmpd " +#define RSEQ_STORE_LONG(arg) "std%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_STORE_INT(arg) "stw%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_LOAD_LONG(arg) "ld%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOAD_INT(arg) "lwz%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOADX_LONG "ldx " /* From base register ("b" constraint) */ +#define RSEQ_CMP_LONG "cmpd " +#define RSEQ_CMP_LONG_INT "cmpdi "
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ start_ip, post_commit_offset, abort_ip) \ @@ -89,10 +92,13 @@ do { \
#else /* #ifdef __PPC64__ */
-#define STORE_WORD "stw " -#define LOAD_WORD "lwz " -#define LOADX_WORD "lwzx " -#define CMP_WORD "cmpw " +#define RSEQ_STORE_LONG(arg) "stw%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_STORE_INT(arg) RSEQ_STORE_LONG(arg) /* To memory ("m" constraint) */ +#define RSEQ_LOAD_LONG(arg) "lwz%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOAD_INT(arg) RSEQ_LOAD_LONG(arg) /* From memory ("m" constraint) */ +#define RSEQ_LOADX_LONG "lwzx " /* From base register ("b" constraint) */ +#define RSEQ_CMP_LONG "cmpw " +#define RSEQ_CMP_LONG_INT "cmpwi "
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ start_ip, post_commit_offset, abort_ip) \ @@ -125,7 +131,7 @@ do { \ RSEQ_INJECT_ASM(1) \ "lis %%r17, (" __rseq_str(cs_label) ")@ha\n\t" \ "addi %%r17, %%r17, (" __rseq_str(cs_label) ")@l\n\t" \ - "stw %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \ + RSEQ_STORE_INT(rseq_cs) "%%r17, %[" __rseq_str(rseq_cs) "]\n\t" \ __rseq_str(label) ":\n\t"
#endif /* #ifdef __PPC64__ */ @@ -136,7 +142,7 @@ do { \
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ RSEQ_INJECT_ASM(2) \ - "lwz %%r17, %[" __rseq_str(current_cpu_id) "]\n\t" \ + RSEQ_LOAD_INT(current_cpu_id) "%%r17, %[" __rseq_str(current_cpu_id) "]\n\t" \ "cmpw cr7, %[" __rseq_str(cpu_id) "], %%r17\n\t" \ "bne- cr7, " __rseq_str(label) "\n\t"
@@ -153,25 +159,25 @@ do { \ * RSEQ_ASM_OP_* (else): doesn't have hard-code registers(unless cr7) */ #define RSEQ_ASM_OP_CMPEQ(var, expect, label) \ - LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \ - CMP_WORD "cr7, %%r17, %[" __rseq_str(expect) "]\n\t" \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_CMP_LONG "cr7, %%r17, %[" __rseq_str(expect) "]\n\t" \ "bne- cr7, " __rseq_str(label) "\n\t"
#define RSEQ_ASM_OP_CMPNE(var, expectnot, label) \ - LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \ - CMP_WORD "cr7, %%r17, %[" __rseq_str(expectnot) "]\n\t" \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_CMP_LONG "cr7, %%r17, %[" __rseq_str(expectnot) "]\n\t" \ "beq- cr7, " __rseq_str(label) "\n\t"
#define RSEQ_ASM_OP_STORE(value, var) \ - STORE_WORD "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" + RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t"
/* Load @var to r17 */ #define RSEQ_ASM_OP_R_LOAD(var) \ - LOAD_WORD "%%r17, %[" __rseq_str(var) "]\n\t" + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t"
/* Store r17 to @var */ #define RSEQ_ASM_OP_R_STORE(var) \ - STORE_WORD "%%r17, %[" __rseq_str(var) "]\n\t" + RSEQ_STORE_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t"
/* Add @count to r17 */ #define RSEQ_ASM_OP_R_ADD(count) \ @@ -179,11 +185,11 @@ do { \
/* Load (r17 + voffp) to r17 */ #define RSEQ_ASM_OP_R_LOADX(voffp) \ - LOADX_WORD "%%r17, %[" __rseq_str(voffp) "], %%r17\n\t" + RSEQ_LOADX_LONG "%%r17, %[" __rseq_str(voffp) "], %%r17\n\t"
/* TODO: implement a faster memcpy. */ #define RSEQ_ASM_OP_R_MEMCPY() \ - "cmpdi %%r19, 0\n\t" \ + RSEQ_CMP_LONG_INT "%%r19, 0\n\t" \ "beq 333f\n\t" \ "addi %%r20, %%r20, -1\n\t" \ "addi %%r21, %%r21, -1\n\t" \ @@ -191,16 +197,16 @@ do { \ "lbzu %%r18, 1(%%r20)\n\t" \ "stbu %%r18, 1(%%r21)\n\t" \ "addi %%r19, %%r19, -1\n\t" \ - "cmpdi %%r19, 0\n\t" \ + RSEQ_CMP_LONG_INT "%%r19, 0\n\t" \ "bne 222b\n\t" \ "333:\n\t" \
#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \ - STORE_WORD "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_STORE_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ __rseq_str(post_commit_label) ":\n\t"
#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \ - STORE_WORD "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \ + RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \ __rseq_str(post_commit_label) ":\n\t"
static inline __attribute__((always_inline)) @@ -235,8 +241,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -248,23 +254,28 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif }
static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
@@ -301,8 +312,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -316,16 +327,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -359,8 +375,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [count] "r" (count) @@ -372,12 +388,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -419,8 +438,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -436,16 +455,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -489,8 +513,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -506,16 +530,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -560,8 +589,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -577,18 +606,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -635,8 +670,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -653,16 +688,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -711,8 +751,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -729,23 +769,23 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif }
-#undef STORE_WORD -#undef LOAD_WORD -#undef LOADX_WORD -#undef CMP_WORD - #endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h index 8ef94ad1cbb4..4e6dc5f0cb42 100644 --- a/tools/testing/selftests/rseq/rseq-s390.h +++ b/tools/testing/selftests/rseq/rseq-s390.h @@ -165,8 +165,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -178,16 +178,21 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -198,7 +203,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) */ static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
@@ -233,8 +238,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -248,16 +253,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -288,8 +298,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [count] "r" (count) @@ -301,12 +311,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -347,8 +360,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -364,16 +377,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -426,8 +444,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -443,18 +461,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -534,8 +558,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [current_cpu_id] "m" (__rseq_abi.cpu_id), - [rseq_cs] "m" (__rseq_abi.rseq_cs), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -555,16 +579,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq-skip.h b/tools/testing/selftests/rseq/rseq-skip.h index 72750b5905a9..7b53dac1fcdd 100644 --- a/tools/testing/selftests/rseq/rseq-skip.h +++ b/tools/testing/selftests/rseq/rseq-skip.h @@ -13,7 +13,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { return -1; } diff --git a/tools/testing/selftests/rseq/rseq-thread-pointer.h b/tools/testing/selftests/rseq/rseq-thread-pointer.h new file mode 100644 index 000000000000..977c25d758b2 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-thread-pointer.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers mathieu.desnoyers@efficios.com + */ + +#ifndef _RSEQ_THREAD_POINTER +#define _RSEQ_THREAD_POINTER + +#if defined(__x86_64__) || defined(__i386__) +#include "rseq-x86-thread-pointer.h" +#elif defined(__PPC__) +#include "rseq-ppc-thread-pointer.h" +#else +#include "rseq-generic-thread-pointer.h" +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h b/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h new file mode 100644 index 000000000000..d3133587d996 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-x86-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers mathieu.desnoyers@efficios.com + */ + +#ifndef _RSEQ_X86_THREAD_POINTER +#define _RSEQ_X86_THREAD_POINTER + +#include <features.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#if __GNUC_PREREQ (11, 1) +static inline void *rseq_thread_pointer(void) +{ + return __builtin_thread_pointer(); +} +#else +static inline void *rseq_thread_pointer(void) +{ + void *__result; + +# ifdef __x86_64__ + __asm__ ("mov %%fs:0, %0" : "=r" (__result)); +# else + __asm__ ("mov %%gs:0, %0" : "=r" (__result)); +# endif + return __result; +} +#endif /* !GCC 11 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h index 640411518e46..bd01dc41ca13 100644 --- a/tools/testing/selftests/rseq/rseq-x86.h +++ b/tools/testing/selftests/rseq/rseq-x86.h @@ -28,6 +28,8 @@
#ifdef __x86_64__
+#define RSEQ_ASM_TP_SEGMENT %%fs + #define rseq_smp_mb() \ __asm__ __volatile__ ("lock; addl $0,-128(%%rsp)" ::: "memory", "cc") #define rseq_smp_rmb() rseq_barrier() @@ -123,14 +125,14 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpq %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -141,7 +143,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -152,16 +154,21 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -172,7 +179,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) */ static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
@@ -184,15 +191,15 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movq %[v], %%rbx\n\t" "cmpq %%rbx, %[expectnot]\n\t" "je %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "movq %[v], %%rbx\n\t" "cmpq %%rbx, %[expectnot]\n\t" "je %l[error2]\n\t" @@ -207,7 +214,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -220,16 +227,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -245,11 +257,11 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) #endif /* final store */ "addq %[count], %[v]\n\t" @@ -258,7 +270,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [count] "er" (count) @@ -269,12 +281,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -286,7 +301,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) * *pval += inc; */ static inline __attribute__((always_inline)) -int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu) +int rseq_offset_deref_addv(intptr_t *ptr, long off, intptr_t inc, int cpu) { RSEQ_INJECT_C(9)
@@ -296,11 +311,11 @@ int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) #endif /* get p+v */ "movq %[ptr], %%rbx\n\t" @@ -314,7 +329,7 @@ int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [ptr] "m" (*ptr), [off] "er" (off), @@ -351,14 +366,14 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpq %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -372,7 +387,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -387,16 +402,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -426,8 +446,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" @@ -436,7 +456,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(5) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpq %[v], %[expect]\n\t" "jnz %l[error2]\n\t" "cmpq %[v2], %[expect2]\n\t" @@ -449,7 +469,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -464,18 +484,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -500,14 +526,14 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "movq %[dst], %[rseq_scratch1]\n\t" "movq %[len], %[rseq_scratch2]\n\t" /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpq %[v], %[expect]\n\t" "jnz 5f\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) "cmpq %[v], %[expect]\n\t" "jnz 7f\n\t" #endif @@ -555,7 +581,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expect] "r" (expect), @@ -574,16 +600,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -600,7 +631,9 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
#endif /* !RSEQ_SKIP_FASTPATH */
-#elif __i386__ +#elif defined(__i386__) + +#define RSEQ_ASM_TP_SEGMENT %%gs
#define rseq_smp_mb() \ __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc") @@ -701,14 +734,14 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpl %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpl %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -719,7 +752,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), [v] "m" (*v), [expect] "r" (expect), [newv] "r" (newv) @@ -730,16 +763,21 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -750,7 +788,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) */ static inline __attribute__((always_inline)) int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, - off_t voffp, intptr_t *load, int cpu) + long voffp, intptr_t *load, int cpu) { RSEQ_INJECT_C(9)
@@ -762,15 +800,15 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[v], %%ebx\n\t" "cmpl %%ebx, %[expectnot]\n\t" "je %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "movl %[v], %%ebx\n\t" "cmpl %%ebx, %[expectnot]\n\t" "je %l[error2]\n\t" @@ -785,7 +823,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expectnot] "r" (expectnot), @@ -798,16 +836,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -823,11 +866,11 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) #endif /* final store */ "addl %[count], %[v]\n\t" @@ -836,7 +879,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [count] "ir" (count) @@ -847,12 +890,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu) , error1 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); #endif } @@ -872,14 +918,14 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpl %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpl %[v], %[expect]\n\t" "jnz %l[error2]\n\t" #endif @@ -894,7 +940,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* try store input */ [v2] "m" (*v2), [newv2] "m" (newv2), @@ -909,16 +955,21 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -938,15 +989,15 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[expect], %%eax\n\t" "cmpl %[v], %%eax\n\t" "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "movl %[expect], %%eax\n\t" "cmpl %[v], %%eax\n\t" "jnz %l[error2]\n\t" @@ -962,7 +1013,7 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* try store input */ [v2] "m" (*v2), [newv2] "r" (newv2), @@ -977,16 +1028,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif
@@ -1008,8 +1064,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) #endif /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "cmpl %[v], %[expect]\n\t" "jnz %l[cmpfail]\n\t" @@ -1018,7 +1074,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, "jnz %l[cmpfail]\n\t" RSEQ_INJECT_ASM(5) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) "cmpl %[v], %[expect]\n\t" "jnz %l[error2]\n\t" "cmpl %[expect2], %[v2]\n\t" @@ -1032,7 +1088,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, RSEQ_ASM_DEFINE_ABORT(4, "", abort) : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* cmp2 input */ [v2] "m" (*v2), [expect2] "r" (expect2), @@ -1047,18 +1103,24 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, , error1, error2, error3 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("1st expected value comparison failed"); error3: + rseq_after_asm_goto(); rseq_bug("2nd expected value comparison failed"); #endif } @@ -1084,15 +1146,15 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, "movl %[dst], %[rseq_scratch1]\n\t" "movl %[len], %[rseq_scratch2]\n\t" /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 5f\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 7f\n\t" @@ -1142,7 +1204,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expect] "m" (expect), @@ -1161,16 +1223,21 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } @@ -1196,15 +1263,15 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, "movl %[dst], %[rseq_scratch1]\n\t" "movl %[len], %[rseq_scratch2]\n\t" /* Start rseq by storing table entry pointer into rseq_cs. */ - RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi])) - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f) + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) RSEQ_INJECT_ASM(3) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 5f\n\t" RSEQ_INJECT_ASM(4) #ifdef RSEQ_COMPARE_TWICE - RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) "movl %[expect], %%eax\n\t" "cmpl %%eax, %[v]\n\t" "jnz 7f\n\t" @@ -1255,7 +1322,7 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, #endif : /* gcc asm goto does not allow outputs */ : [cpu_id] "r" (cpu), - [rseq_abi] "r" (&__rseq_abi), + [rseq_offset] "r" (rseq_offset), /* final store input */ [v] "m" (*v), [expect] "m" (expect), @@ -1274,16 +1341,21 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, , error1, error2 #endif ); + rseq_after_asm_goto(); return 0; abort: + rseq_after_asm_goto(); RSEQ_INJECT_FAILED return -1; cmpfail: + rseq_after_asm_goto(); return 1; #ifdef RSEQ_COMPARE_TWICE error1: + rseq_after_asm_goto(); rseq_bug("cpu_id comparison failed"); error2: + rseq_after_asm_goto(); rseq_bug("expected value comparison failed"); #endif } diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 7159eb777fd3..986b9458efb2 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -26,131 +26,124 @@ #include <assert.h> #include <signal.h> #include <limits.h> +#include <dlfcn.h> +#include <stddef.h>
+#include "../kselftest.h" #include "rseq.h"
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +static const ptrdiff_t *libc_rseq_offset_p; +static const unsigned int *libc_rseq_size_p; +static const unsigned int *libc_rseq_flags_p;
-__thread volatile struct rseq __rseq_abi = { - .cpu_id = RSEQ_CPU_ID_UNINITIALIZED, -}; +/* Offset from the thread pointer to the rseq area. */ +ptrdiff_t rseq_offset;
-/* - * Shared with other libraries. This library may take rseq ownership if it is - * still 0 when executing the library constructor. Set to 1 by library - * constructor when handling rseq. Set to 0 in destructor if handling rseq. - */ -int __rseq_handled; +/* Size of the registered rseq area. 0 if the registration was + unsuccessful. */ +unsigned int rseq_size = -1U; + +/* Flags used during rseq registration. */ +unsigned int rseq_flags;
-/* Whether this library have ownership of rseq registration. */ static int rseq_ownership;
-static __thread volatile uint32_t __rseq_refcount; +static +__thread struct rseq_abi __rseq_abi __attribute__((tls_model("initial-exec"))) = { + .cpu_id = RSEQ_ABI_CPU_ID_UNINITIALIZED, +};
-static void signal_off_save(sigset_t *oldset) +static int sys_rseq(struct rseq_abi *rseq_abi, uint32_t rseq_len, + int flags, uint32_t sig) { - sigset_t set; - int ret; - - sigfillset(&set); - ret = pthread_sigmask(SIG_BLOCK, &set, oldset); - if (ret) - abort(); + return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig); }
-static void signal_restore(sigset_t oldset) +int rseq_available(void) { - int ret; + int rc;
- ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); - if (ret) + rc = sys_rseq(NULL, 0, 0, 0); + if (rc != -1) abort(); -} - -static int sys_rseq(volatile struct rseq *rseq_abi, uint32_t rseq_len, - int flags, uint32_t sig) -{ - return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig); + switch (errno) { + case ENOSYS: + return 0; + case EINVAL: + return 1; + default: + abort(); + } }
int rseq_register_current_thread(void) { - int rc, ret = 0; - sigset_t oldset; + int rc;
- if (!rseq_ownership) + if (!rseq_ownership) { + /* Treat libc's ownership as a successful registration. */ return 0; - signal_off_save(&oldset); - if (__rseq_refcount == UINT_MAX) { - ret = -1; - goto end; - } - if (__rseq_refcount++) - goto end; - rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), 0, RSEQ_SIG); - if (!rc) { - assert(rseq_current_cpu_raw() >= 0); - goto end; } - if (errno != EBUSY) - __rseq_abi.cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED; - ret = -1; - __rseq_refcount--; -end: - signal_restore(oldset); - return ret; + rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), 0, RSEQ_SIG); + if (rc) + return -1; + assert(rseq_current_cpu_raw() >= 0); + return 0; }
int rseq_unregister_current_thread(void) { - int rc, ret = 0; - sigset_t oldset; + int rc;
- if (!rseq_ownership) + if (!rseq_ownership) { + /* Treat libc's ownership as a successful unregistration. */ return 0; - signal_off_save(&oldset); - if (!__rseq_refcount) { - ret = -1; - goto end; } - if (--__rseq_refcount) - goto end; - rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), - RSEQ_FLAG_UNREGISTER, RSEQ_SIG); - if (!rc) - goto end; - __rseq_refcount = 1; - ret = -1; -end: - signal_restore(oldset); - return ret; + rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); + if (rc) + return -1; + return 0; }
-int32_t rseq_fallback_current_cpu(void) +static __attribute__((constructor)) +void rseq_init(void) { - int32_t cpu; - - cpu = sched_getcpu(); - if (cpu < 0) { - perror("sched_getcpu()"); - abort(); + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) { + /* rseq registration owned by glibc */ + rseq_offset = *libc_rseq_offset_p; + rseq_size = *libc_rseq_size_p; + rseq_flags = *libc_rseq_flags_p; + return; } - return cpu; -} - -void __attribute__((constructor)) rseq_init(void) -{ - /* Check whether rseq is handled by another library. */ - if (__rseq_handled) + if (!rseq_available()) return; - __rseq_handled = 1; rseq_ownership = 1; + rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer(); + rseq_size = sizeof(struct rseq_abi); + rseq_flags = 0; }
-void __attribute__((destructor)) rseq_fini(void) +static __attribute__((destructor)) +void rseq_exit(void) { if (!rseq_ownership) return; - __rseq_handled = 0; + rseq_offset = 0; + rseq_size = -1U; rseq_ownership = 0; } + +int32_t rseq_fallback_current_cpu(void) +{ + int32_t cpu; + + cpu = sched_getcpu(); + if (cpu < 0) { + perror("sched_getcpu()"); + abort(); + } + return cpu; +} diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index 3f63eb362b92..9d850b290c2e 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -16,7 +16,9 @@ #include <errno.h> #include <stdio.h> #include <stdlib.h> -#include <linux/rseq.h> +#include <stddef.h> +#include "rseq-abi.h" +#include "compiler.h"
/* * Empty code injection macros, override when testing. @@ -43,8 +45,20 @@ #define RSEQ_INJECT_FAILED #endif
-extern __thread volatile struct rseq __rseq_abi; -extern int __rseq_handled; +#include "rseq-thread-pointer.h" + +/* Offset from the thread pointer to the rseq area. */ +extern ptrdiff_t rseq_offset; +/* Size of the registered rseq area. 0 if the registration was + unsuccessful. */ +extern unsigned int rseq_size; +/* Flags used during rseq registration. */ +extern unsigned int rseq_flags; + +static inline struct rseq_abi *rseq_get_abi(void) +{ + return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset); +}
#define rseq_likely(x) __builtin_expect(!!(x), 1) #define rseq_unlikely(x) __builtin_expect(!!(x), 0) @@ -108,7 +122,7 @@ int32_t rseq_fallback_current_cpu(void); */ static inline int32_t rseq_current_cpu_raw(void) { - return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id); + return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id); }
/* @@ -124,7 +138,7 @@ static inline int32_t rseq_current_cpu_raw(void) */ static inline uint32_t rseq_cpu_start(void) { - return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start); + return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start); }
static inline uint32_t rseq_current_cpu(void) @@ -139,11 +153,7 @@ static inline uint32_t rseq_current_cpu(void)
static inline void rseq_clear_rseq_cs(void) { -#ifdef __LP64__ - __rseq_abi.rseq_cs.ptr = 0; -#else - __rseq_abi.rseq_cs.ptr.ptr32 = 0; -#endif + RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0); }
/*
linux-stable-mirror@lists.linaro.org