I'm announcing the release of the 5.13.6 kernel.
All users of the 5.13 kernel series must upgrade.
The updated 5.13.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-5.13.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/arm64/tagged-address-abi.rst | 26 - Documentation/driver-api/early-userspace/early_userspace_support.rst | 8 Documentation/filesystems/ramfs-rootfs-initramfs.rst | 2 Documentation/networking/ip-sysctl.rst | 2 Documentation/trace/histogram.rst | 2 Makefile | 2 arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts | 4 arch/arm/configs/multi_v7_defconfig | 2 arch/arm64/kernel/Makefile | 2 arch/arm64/kernel/mte.c | 15 arch/nds32/mm/mmap.c | 2 arch/powerpc/kvm/book3s_hv.c | 2 arch/powerpc/kvm/book3s_hv_nested.c | 20 + arch/powerpc/kvm/book3s_rtas.c | 25 + arch/powerpc/kvm/powerpc.c | 4 arch/riscv/include/asm/efi.h | 4 arch/riscv/mm/init.c | 4 arch/s390/boot/text_dma.S | 19 - arch/s390/include/asm/ftrace.h | 1 arch/s390/kernel/ftrace.c | 2 arch/s390/kernel/mcount.S | 4 arch/s390/net/bpf_jit_comp.c | 2 arch/x86/kvm/cpuid.c | 3 arch/x86/kvm/svm/sev.c | 14 drivers/acpi/Kconfig | 2 drivers/acpi/utils.c | 7 drivers/base/auxiliary.c | 8 drivers/base/core.c | 6 drivers/block/rbd.c | 32 - drivers/bus/mhi/core/main.c | 17 drivers/bus/mhi/pci_generic.c | 45 +- drivers/firmware/arm_scmi/bus.c | 3 drivers/firmware/efi/dev-path-parser.c | 46 -- drivers/firmware/efi/efi.c | 13 drivers/firmware/efi/tpm.c | 8 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 drivers/gpu/drm/drm_ioctl.c | 3 drivers/gpu/drm/i915/gvt/handlers.c | 15 drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c | 1 drivers/gpu/drm/ttm/ttm_device.c | 2 drivers/gpu/drm/vc4/vc4_hdmi.c | 49 +- drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 1 drivers/i2c/busses/i2c-mpc.c | 4 drivers/media/pci/intel/ipu3/cio2-bridge.c | 6 drivers/media/pci/ngene/ngene-core.c | 2 drivers/media/pci/ngene/ngene.h | 14 drivers/misc/eeprom/at24.c | 17 drivers/mmc/core/host.c | 20 - drivers/net/bonding/bond_main.c | 183 ++++++++-- drivers/net/dsa/mv88e6xxx/Kconfig | 2 drivers/net/dsa/sja1105/sja1105_main.c | 6 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 63 ++- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 9 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 2 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 18 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 3 drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 16 drivers/net/ethernet/google/gve/gve_main.c | 5 drivers/net/ethernet/hisilicon/hip04_eth.c | 6 drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 6 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 1 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 10 drivers/net/ethernet/ibm/ibmvnic.c | 2 drivers/net/ethernet/intel/e1000e/netdev.c | 1 drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 1 drivers/net/ethernet/intel/iavf/iavf_main.c | 1 drivers/net/ethernet/intel/igb/igb_main.c | 15 drivers/net/ethernet/intel/igc/igc.h | 2 drivers/net/ethernet/intel/igc/igc_main.c | 3 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 drivers/net/ethernet/intel/ixgbevf/ipsec.c | 20 - drivers/net/ethernet/mscc/ocelot_net.c | 9 drivers/net/ethernet/realtek/r8169_main.c | 3 drivers/net/ethernet/sfc/efx_channels.c | 18 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 8 drivers/net/phy/marvell10g.c | 40 +- drivers/net/usb/hso.c | 33 + drivers/nvme/host/core.c | 5 drivers/nvme/host/pci.c | 5 drivers/pwm/pwm-sprd.c | 11 drivers/regulator/hi6421-regulator.c | 30 - drivers/scsi/scsi_transport_iscsi.c | 90 +--- drivers/spi/spi-bcm2835.c | 12 drivers/spi/spi-cadence-quadspi.c | 3 drivers/spi/spi-cadence.c | 14 drivers/spi/spi-mt65xx.c | 16 drivers/spi/spi-stm32.c | 9 drivers/target/target_core_sbc.c | 35 - drivers/target/target_core_transport.c | 2 drivers/usb/core/hub.c | 120 ++++-- drivers/usb/core/quirks.c | 4 drivers/usb/dwc2/core.h | 4 drivers/usb/dwc2/core_intr.c | 3 drivers/usb/dwc2/gadget.c | 31 + drivers/usb/dwc2/hcd.c | 6 drivers/usb/dwc2/params.c | 1 drivers/usb/gadget/udc/tegra-xudc.c | 1 drivers/usb/host/ehci-hcd.c | 18 drivers/usb/host/max3421-hcd.c | 44 -- drivers/usb/host/xhci-hub.c | 3 drivers/usb/host/xhci-pci-renesas.c | 16 drivers/usb/host/xhci-pci.c | 7 drivers/usb/renesas_usbhs/fifo.c | 7 drivers/usb/serial/cp210x.c | 5 drivers/usb/serial/option.c | 3 drivers/usb/storage/unusual_uas.h | 7 drivers/usb/typec/stusb160x.c | 20 - drivers/usb/typec/tipd/core.c | 9 fs/afs/cmservice.c | 25 - fs/afs/write.c | 18 fs/btrfs/backref.c | 6 fs/btrfs/backref.h | 3 fs/btrfs/delayed-ref.c | 4 fs/btrfs/extent-tree.c | 3 fs/btrfs/qgroup.c | 38 +- fs/btrfs/qgroup.h | 2 fs/btrfs/tests/qgroup-tests.c | 20 - fs/btrfs/tree-log.c | 31 + fs/ceph/mds_client.c | 2 fs/cifs/smb2ops.c | 49 ++ fs/hugetlbfs/inode.c | 2 fs/io_uring.c | 33 + fs/proc/base.c | 2 fs/userfaultfd.c | 26 - include/acpi/acpi_bus.h | 8 include/drm/drm_ioctl.h | 1 include/linux/highmem.h | 2 include/linux/marvell_phy.h | 6 include/linux/memblock.h | 4 include/net/bonding.h | 9 include/net/mptcp.h | 5 include/sound/soc.h | 6 include/trace/events/afs.h | 67 +++ kernel/bpf/verifier.c | 2 kernel/dma/ops_helpers.c | 12 kernel/time/posix-cpu-timers.c | 10 kernel/time/timer.c | 8 kernel/trace/ring_buffer.c | 28 + kernel/trace/trace.c | 4 kernel/trace/trace_events_hist.c | 22 - kernel/trace/trace_synth.h | 2 kernel/tracepoint.c | 2 mm/kfence/core.c | 19 - mm/memblock.c | 3 mm/memory.c | 11 mm/page_alloc.c | 29 - net/bpf/test_run.c | 3 net/caif/caif_socket.c | 3 net/core/dev.c | 28 + net/core/skbuff.c | 1 net/core/skmsg.c | 16 net/decnet/af_decnet.c | 27 - net/ipv4/tcp_bpf.c | 2 net/ipv4/tcp_fastopen.c | 28 + net/ipv4/tcp_input.c | 19 - net/ipv4/tcp_ipv4.c | 2 net/ipv4/udp.c | 25 + net/ipv4/udp_bpf.c | 2 net/ipv6/ip6_output.c | 4 net/ipv6/route.c | 2 net/ipv6/udp.c | 25 + net/mptcp/mib.c | 1 net/mptcp/mib.h | 1 net/mptcp/options.c | 24 - net/mptcp/pm_netlink.c | 10 net/mptcp/protocol.c | 81 ++-- net/mptcp/protocol.h | 14 net/mptcp/subflow.c | 21 - net/mptcp/syncookies.c | 16 net/netrom/nr_timer.c | 20 - net/sched/act_skbmod.c | 12 net/sched/cls_api.c | 2 net/sched/cls_tcindex.c | 5 net/sctp/auth.c | 2 net/sctp/socket.c | 4 samples/bpf/xdpsock_user.c | 28 + scripts/Makefile.build | 2 sound/core/pcm_native.c | 25 - sound/hda/intel-dsp-config.c | 4 sound/isa/sb/sb16_csp.c | 4 sound/pci/hda/patch_hdmi.c | 1 sound/pci/hda/patch_realtek.c | 1 sound/soc/codecs/rt5631.c | 2 sound/soc/codecs/wm_adsp.c | 2 sound/soc/soc-pcm.c | 22 - sound/soc/sof/intel/pci-tgl.c | 1 sound/usb/mixer.c | 10 sound/usb/quirks.c | 3 tools/bpf/bpftool/common.c | 5 tools/perf/builtin-inject.c | 13 tools/perf/builtin-report.c | 33 + tools/perf/builtin-sched.c | 33 + tools/perf/builtin-script.c | 8 tools/perf/tests/event_update.c | 6 tools/perf/tests/maps.c | 2 tools/perf/tests/topology.c | 1 tools/perf/util/data.c | 2 tools/perf/util/dso.c | 4 tools/perf/util/env.c | 2 tools/perf/util/lzma.c | 8 tools/perf/util/map.c | 2 tools/perf/util/probe-event.c | 4 tools/perf/util/probe-file.c | 4 tools/perf/util/sort.c | 2 tools/perf/util/sort.h | 2 tools/testing/selftests/net/icmp_redirect.sh | 5 tools/testing/selftests/net/mptcp/mptcp_join.sh | 2 tools/testing/selftests/vm/userfaultfd.c | 6 209 files changed, 1838 insertions(+), 862 deletions(-)
Adrian Hunter (1): driver core: Prevent warning when removing a device link from unregistered consumer
Alain Volmat (1): spi: stm32: fixes pm_runtime calls in probe/remove
Alan Young (1): ALSA: pcm: Call substream ack() method upon compat mmap commit
Aleksandr Loktionov (1): igb: Check if num of q_vectors is smaller than max before array access
Alexander Egorenkov (1): s390/boot: fix use of expolines in the DMA code
Alexander Potapenko (2): kfence: move the size check to the beginning of __kfence_alloc() kfence: skip all GFP_ZONEMASK allocations
Alexander Tsoy (1): ALSA: usb-audio: Add registration quirk for JBL Quantum headsets
Alexandru Tachici (1): spi: spi-bcm2835: Fix deadlock
Amelie Delaunay (2): usb: typec: stusb160x: register role switch before interrupt registration usb: typec: stusb160x: Don't block probing of consumer of "connector" nodes
Anand Jain (1): btrfs: check for missing device in btrfs_trim_fs
Andy Shevchenko (2): efi/dev-path-parser: Switch to use for_each_acpi_dev_match() ACPI: utils: Fix reference counting in for_each_acpi_dev_match()
Axel Lin (2): regulator: hi6421: Use correct variable type for regmap api val argument regulator: hi6421: Fix getting wrong drvdata
Bhaumik Bhatt (2): bus: mhi: pci_generic: Apply no-op for wake using sideband wake boolean bus: mhi: core: Validate channel ID when processing command completions
Bin Meng (1): riscv: Fix 32-bit RISC-V boot failure
Casey Chen (1): nvme-pci: do not call nvme_dev_remove_admin from nvme_remove
Charles Baylis (1): drm: Return -ENOTTY for non-drm ioctls
Charles Keepax (1): ASoC: wm_adsp: Correct wm_coeff_tlv_get handling
Chengwen Feng (1): net: hns3: fix possible mismatches resp of mailbox
Chris Packham (1): i2c: mpc: Poll for MCF
Christoph Hellwig (2): nvme: set the PRACT bit when using Write Zeroes with T10 PI mm: call flush_dcache_page() in memcpy_to_page() and memzero_page()
Christophe JAILLET (7): ixgbe: Fix an error handling path in 'ixgbe_probe()' igc: Fix an error handling path in 'igc_probe()' igb: Fix an error handling path in 'igb_probe()' fm10k: Fix an error handling path in 'fm10k_probe()' e1000e: Fix an error handling path in 'e1000_probe()' iavf: Fix an error handling path in 'iavf_probe()' gve: Fix an error handling path in 'gve_probe()'
Colin Ian King (2): liquidio: Fix unintentional sign extension issue on left shift of u16 s390/bpf: Perform r1 range checking before accessing jit->seen_reg[r1]
Colin Xu (1): drm/i915/gvt: Clear d3_entered on elsp cmd submission.
Daniel Borkmann (1): bpf: Fix tail_call_reachable rejection for interpreter when jit failed
David Disseldorp (1): scsi: target: Fix NULL dereference on XCOPY completion
David Howells (2): afs: Fix tracepoint string placement with built-in AFS afs: Fix setting of writeback_index
David Jeffery (1): usb: ehci: Prevent missed ehci interrupts with edge-triggered MSI
Dmitry Bogdanov (1): scsi: target: Fix protect handling in WRITE SAME(32)
Dongliang Mu (1): usb: hso: fix error handling code of hso_create_net_device
Eric Dumazet (1): net/tcp_fastopen: fix data races around tfo_active_disable_stamp
Filipe Manana (2): btrfs: fix unpersisted i_size on fsync after expanding truncate btrfs: fix lock inversion problem when doing qgroup extent tracing
Florian Fainelli (1): skbuff: Fix build with SKB extensions disabled
Frederic Weisbecker (1): posix-cpu-timers: Fix rearm racing against process tick
Geert Uytterhoeven (1): net: dsa: mv88e6xxx: NET_DSA_MV88E6XXX_PTP should depend on NET_DSA_MV88E6XXX
Geliang Tang (1): mptcp: add sk parameter for mptcp_get_options
Greg Kroah-Hartman (2): nds32: fix up stack guard gap Linux 5.13.6
Greg Thelen (1): usb: xhci: avoid renesas_usb_fw.mem when it's unusable
Gustavo A. R. Silva (1): media: ngene: Fix out-of-bounds bug in ngene_command_config_free_buf()
Hangbin Liu (2): selftests: icmp_redirect: remove from checking for IPv6 route get selftests: icmp_redirect: IPv6 PMTU info should be cleared after redirect
Haoran Luo (1): tracing: Fix bug in rb_per_cpu_empty() that might cause deadloop.
Heinrich Schuchardt (1): RISC-V: load initrd wherever it fits into memory
Hui Wang (1): ALSA: hda/realtek: Fix pop noise and 2 Front Mic issues on a machine
Ian Ray (1): USB: serial: cp210x: fix comments for GE CS1000
Ilya Dryomov (2): rbd: don't hold lock_rwsem while running_list is being drained rbd: always kick acquire on "acquired" and "released" notifications
Ioana Ciornei (1): dpaa2-switch: seed the buffer pool after allocating the swp
Jakub Sitnicki (1): bpf, sockmap, udp: sk_prot needs inuse_idx set for proc stats
Jason Ekstrand (1): drm/ttm: Force re-init if ttm_global_init() fails
Jedrzej Jagielski (1): igb: Fix position of assignment to *ring
Jens Axboe (1): io_uring: fix early fdput() of file
Jian Shen (1): net: hns3: fix rx VLAN offload state inconsistent issue
Jianguo Wu (5): mptcp: fix warning in __skb_flow_dissect() when do syn cookie for subflow join mptcp: remove redundant req destruct in subflow_check_req() mptcp: fix syncookie process if mptcp can not_accept new subflow mptcp: avoid processing packet if a subflow reset selftests: mptcp: fix case multiple subflows limited by server
John Fastabend (2): bpf, sockmap: Fix potential memory leak on unlikely error case bpf, sockmap, tcp: sk_prot needs inuse_idx set for proc stats
John Keeping (1): USB: serial: cp210x: add ID for CEL EM3588 USB ZigBee stick
Julian Sikorski (1): USB: usb-storage: Add LaCie Rugged USB3-FW to IGNORE_UAS
Jérôme Glisse (1): misc: eeprom: at24: Always append device id even if label property is set.
Kalesh AP (1): bnxt_en: don't disable an already disabled PCI device
Lecopzer Chen (1): Kbuild: lto: fix module versionings mismatch in GNU make 3.X
Like Xu (1): KVM: x86/pmu: Clear anythread deprecated bit when 0xa leaf is unsupported on the SVM
Likun Gao (1): drm/amdgpu: update golden setting for sienna_cichlid
Linus Torvalds (1): ACPI: fix NULL pointer dereference
Loic Poulain (1): bus: mhi: pci_generic: Fix inbound IPCR channel
Luis Henriques (1): ceph: don't WARN if we're still opening a session to an MDS
Mahesh Bandewar (1): bonding: fix build issue
Marc Zyngier (1): firmware/efi: Tell memblock about EFI iomem reservations
Marcelo Henrique Cerri (1): proc: Avoid mixing integer types in mem_rw()
Marco De Marco (1): USB: serial: option: add support for u-blox LARA-R6 family
Marek Behún (1): net: phy: marvell10g: fix differentiation of 88X3310 from 88X3340
Marek Szyprowski (1): usb: dwc2: Skip clock gating on Samsung SoCs
Marek Vasut (1): spi: cadence: Correct initialisation of runtime PM again
Mark Rutland (2): arm64: mte: fix restoration of GCR_EL1 from suspend arm64: entry: fix KCOV suppression
Mark Tomlinson (1): usb: max-3421: Prevent corruption of freed memory
Markus Boehme (1): ixgbe: Fix packet corruption due to missing DMA sync
Martin Kepplinger (1): usb: typec: tipd: Don't block probing of consumer of "connector" nodes
Mathias Nyman (3): xhci: Fix lost USB 2 remote wake usb: hub: Disable USB 3 device initiated lpm if exit latency is too high usb: hub: Fix link power management max exit latency (MEL) calculations
Matthieu Baerts (1): mptcp: fix 'masking a bool' warning
Maxim Schwalm (1): ASoC: rt5631: Fix regcache sync errors on resume
Maxime Ripard (2): drm/vc4: hdmi: Drop devm interrupt handler for CEC interrupts drm/panel: raspberrypi-touchscreen: Prevent double-free
Michael Chan (3): bnxt_en: Refresh RoCE capabilities in bnxt_ulp_probe() bnxt_en: Add missing check for BNXT_STATE_ABORT_ERR in bnxt_fw_rset_task() bnxt_en: Validate vlan protocol ID on RX packets
Michal Suchanek (1): efi/tpm: Differentiate missing and invalid final event log table.
Mike Christie (1): scsi: iscsi: Fix iface sysfs attr detection
Mike Kravetz (1): hugetlbfs: fix mount mode command line processing
Mike Rapoport (1): memblock: make for_each_mem_range() traverse MEMBLOCK_HOTPLUG regions
Minas Harutyunyan (2): usb: dwc2: gadget: Fix GOUTNAK flow for Slave mode. usb: dwc2: gadget: Fix sending zero length packet in DDMA mode.
Mohammad Athari Bin Ismail (1): net: stmmac: Terminate FPE workqueue in suspend
Moritz Fischer (1): Revert "usb: renesas-xhci: Fix handling of unknown ROM state"
Nguyen Dinh Phi (1): netrom: Decrease sock refcount when sock timers expire
Nicholas Piggin (4): KVM: PPC: Book3S: Fix CONFIG_TRANSACTIONAL_MEM=n crash KVM: PPC: Fix kvm_arch_vcpu_ioctl vcpu_load leak KVM: PPC: Book3S: Fix H_RTAS rets buffer overflow KVM: PPC: Book3S HV Nested: Sanitise H_ENTER_NESTED TM state
Nicolas Dichtel (1): ipv6: fix 'disable_policy' for fwd packets
Nicolas Saenz Julienne (1): timers: Fix get_next_timer_interrupt() with no timers pending
Olivier Langlois (1): io_uring: Fix race condition when sqp thread goes to sleep
Paolo Abeni (4): mptcp: use fast lock for subflows when possible mptcp: refine mptcp_cleanup_rbuf mptcp: properly account bulk freed memory ipv6: fix another slab-out-of-bounds in fib6_nh_flush_exceptions
Paul Blakey (1): skbuff: Release nfct refcount on napi stolen or re-used skbs
Pavel Begunkov (2): io_uring: explicitly count entries for poll reqs io_uring: remove double poll entry on arm failure
Pavel Skripkin (1): net: sched: fix memory leak in tcindex_partial_destroy_work
Peilin Ye (1): net/sched: act_skbmod: Skip non-Ethernet packets
Peter Collingbourne (2): selftest: use mmap instead of posix_memalign to allocate memory userfaultfd: do not untag user pointers
Peter Hess (1): spi: mediatek: fix fifo rx mode
Peter Ujfalusi (1): driver core: auxiliary bus: Fix memory leak when driver_register() fail
Pierre-Louis Bossart (1): ALSA: hda: intel-dsp-cfg: add missing ElkhartLake PCI ID
Qi Zheng (1): mm: fix the deadlock in finish_fault()
Randy Dunlap (1): net: hisilicon: rename CACHE_LINE_MASK to avoid redefinition
Riccardo Mancini (17): perf inject: Fix dso->nsinfo refcounting perf map: Fix dso->nsinfo refcounting perf probe: Fix dso->nsinfo refcounting perf env: Fix sibling_dies memory leak perf test session_topology: Delete session->evlist perf test event_update: Fix memory leak of evlist perf test event_update: Fix memory leak of unit perf dso: Fix memory leak in dso__new_map() perf test maps__merge_in: Fix memory leak of maps perf env: Fix memory leak of cpu_pmu_caps perf report: Free generated help strings for sort option perf script: Release zstd data perf script: Fix memory 'threads' and 'cpus' leaks on exit perf lzma: Close lzma stream on exit perf probe-file: Delete namelist in del_events() on the error path perf data: Close all files in close_dir() perf inject: Close inject.output on exit
Robert Richter (2): ACPI: Kconfig: Fix table override from built-in initrd Documentation: Fix intiramfs script name
Roman Skakun (1): dma-mapping: handle vmalloc addresses in dma_common_{mmap,get_sgtable}
Ronnie Sahlberg (2): cifs: only write 64kb at a time when fallocating a small region of a file cifs: fix fallocate when trying to allocate a hole.
Sathya Prakash M R (1): ASoC: SOF: Intel: Update ADL descriptor to use ACPI power states
Sayanta Pattanayak (1): r8169: Avoid duplicate sysfs entry creation error
Sean Christopherson (2): KVM: SVM: Return -EFAULT if copy_to_user() for SEV mig packet header fails KVM: SVM: Fix sev_pin_memory() error checks in SEV migration utilities
Sergei Trofimovich (1): mm: page_alloc: fix page_poison=1 / INIT_ON_ALLOC_DEFAULT_ON interaction
Shahjada Abul Husain (1): cxgb4: fix IRQ free race during driver unload
Somnath Kotur (2): bnxt_en: fix error path of FW reset bnxt_en: Check abort error state in bnxt_half_open_nic()
Stefan Wahren (1): ARM: multi_v7_defconfig: Make NOP_USB_XCEIV driver built-in
Stephen Boyd (1): mmc: core: Don't allocate IDA for OF aliases
Steven Rostedt (VMware) (3): tracepoints: Update static_call before tp_funcs when adding a tracepoint tracing/histogram: Rename "cpu" to "common_cpu" tracing: Synthetic event field_pos is an index not a boolean
Sudeep Holla (1): firmware: arm_scmi: Ensure drivers provide a probe function
Sukadev Bhattiprolu (1): ibmvnic: Remove the proper scrq flush
Taehee Yoo (8): bonding: fix suspicious RCU usage in bond_ipsec_add_sa() bonding: fix null dereference in bond_ipsec_add_sa() ixgbevf: use xso.real_dev instead of xso.dev in callback functions of struct xfrmdev_ops bonding: fix suspicious RCU usage in bond_ipsec_del_sa() bonding: disallow setting nested bonding + ipsec offload bonding: Add struct bond_ipesc to manage SA bonding: fix suspicious RCU usage in bond_ipsec_offload_ok() bonding: fix incorrect return value of bond_ipsec_offload_ok()
Takashi Iwai (4): ALSA: usb-audio: Add missing proc text entry for BESPOKEN type ALSA: sb: Fix potential ABBA deadlock in CSP driver ALSA: hdmi: Expose all pins on MSI MS-7C94 board ALSA: pcm: Fix mmap capability check
Tao Zhou (1): drm/amdgpu: update gc golden setting for dimgrey_cavefish
Tobias Klauser (1): bpftool: Check malloc return value in mount_bpffs_for_pin
Tom Rix (2): igc: change default return of igc_read_phy_reg() afs: check function return
Uwe Kleine-König (1): pwm: sprd: Ensure configuring period and duty_cycle isn't wrongly skipped
Vadim Fedorenko (1): udp: check encap socket in __udp_lib_err
Vasily Gorbik (1): s390/ftrace: fix ftrace_update_ftrace_func implementation
Vijendar Mukunda (1): ASoC: soc-pcm: add a flag to reverse the stop sequence
Vincent Palatin (1): Revert "USB: quirks: ignore remote wake-up on Fibocom L850-GL LTE modem"
Vinicius Costa Gomes (2): igc: Fix use-after-free error during reset igb: Fix use-after-free error during reset
Vladimir Oltean (2): net: ocelot: fix switchdev objects synced for wrong netdev with LAG offload net: dsa: sja1105: make VID 4095 a bridge VLAN too
Wang Hai (1): bpf, samples: Fix xdpsock with '-M' parameter missing unload process
Wei Wang (1): tcp: disable TFO blackhole logic by default
Xiaojian Du (1): drm/amdgpu: update the golden setting for vangogh
Xin Long (2): sctp: trim optlen when it's a huge value in sctp_setsockopt sctp: update active_key for asoc when old key is being replaced
Xuan Zhuo (2): bpf, test: fix NULL pointer dereference on invalid expected_attach_type xdp, net: Fix use-after-free in bpf_xdp_link_release
Yajun Deng (2): net: decnet: Fix sleeping inside in af_decnet net: sched: cls_api: Fix the the wrong parameter
Yang Jihong (1): perf sched: Fix record failure when CONFIG_SCHEDSTATS is not set
Yang Yingliang (1): io_uring: fix memleak in io_init_wq_offload()
Yoshihiro Shimoda (1): usb: renesas_usbhs: Fix superfluous irqs happen after usb_pkt_pop()
Yoshitaka Ikeda (3): spi: spi-cadence-quadspi: Fix division by zero warning spi: spi-cadence-quadspi: Revert "Fix division by zero warning" spi: spi-cadence-quadspi: Fix division by zero warning - try2
YueHaibing (1): stmmac: platform: Fix signedness bug in stmmac_probe_config_dt()
Zack Rusin (1): drm/vmwgfx: Fix a bad merge in otable batch takedown
Zev Weiss (1): ARM: dts: aspeed: Update e3c246d4i vuart properties
Zhang Qilong (1): usb: gadget: Fix Unbalanced pm_runtime_enable in tegra_xudc_probe
Zhihao Cheng (1): nvme-pci: don't WARN_ON in nvme_reset_work if ctrl.state is not RESETTING
Ziyang Xuan (1): net: fix uninit-value in caif_seqpkt_sendmsg
Íñigo Huguet (2): sfc: fix lack of XDP TX queues - error XDP TX failed (-22) sfc: ensure correct number of XDP queues
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst index 459e6b66ff68..0c9120ec58ae 100644 --- a/Documentation/arm64/tagged-address-abi.rst +++ b/Documentation/arm64/tagged-address-abi.rst @@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space management (e.g. ``mprotect()``, ``madvise()``). The use of valid - tagged pointers in this context is allowed with the exception of - ``brk()``, ``mmap()`` and the ``new_address`` argument to - ``mremap()`` as these have the potential to alias with existing - user addresses. - - NOTE: This behaviour changed in v5.6 and so some earlier kernels may - incorrectly accept valid tagged pointers for the ``brk()``, - ``mmap()`` and ``mremap()`` system calls. + tagged pointers in this context is allowed with these exceptions: + + - ``brk()``, ``mmap()`` and the ``new_address`` argument to + ``mremap()`` as these have the potential to alias with existing + user addresses. + + NOTE: This behaviour changed in v5.6 and so some earlier kernels may + incorrectly accept valid tagged pointers for the ``brk()``, + ``mmap()`` and ``mremap()`` system calls. + + - The ``range.start``, ``start`` and ``dst`` arguments to the + ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from + ``userfaultfd()``, as fault addresses subsequently obtained by reading + the file descriptor will be untagged, which may otherwise confuse + tag-unaware programs. + + NOTE: This behaviour changed in v5.14 and so some earlier kernels may + incorrectly accept valid tagged pointers for this system call.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI relaxation is disabled by default and the application thread needs to diff --git a/Documentation/driver-api/early-userspace/early_userspace_support.rst b/Documentation/driver-api/early-userspace/early_userspace_support.rst index 8a58c61932ff..61bdeac1bae5 100644 --- a/Documentation/driver-api/early-userspace/early_userspace_support.rst +++ b/Documentation/driver-api/early-userspace/early_userspace_support.rst @@ -69,17 +69,17 @@ early userspace image can be built by an unprivileged user.
As a technical note, when directories and files are specified, the entire CONFIG_INITRAMFS_SOURCE is passed to -usr/gen_initramfs_list.sh. This means that CONFIG_INITRAMFS_SOURCE +usr/gen_initramfs.sh. This means that CONFIG_INITRAMFS_SOURCE can really be interpreted as any legal argument to -gen_initramfs_list.sh. If a directory is specified as an argument then +gen_initramfs.sh. If a directory is specified as an argument then the contents are scanned, uid/gid translation is performed, and usr/gen_init_cpio file directives are output. If a directory is -specified as an argument to usr/gen_initramfs_list.sh then the +specified as an argument to usr/gen_initramfs.sh then the contents of the file are simply copied to the output. All of the output directives from directory scanning and file contents copying are processed by usr/gen_init_cpio.
-See also 'usr/gen_initramfs_list.sh -h'. +See also 'usr/gen_initramfs.sh -h'.
Where's this all leading? ========================= diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.rst b/Documentation/filesystems/ramfs-rootfs-initramfs.rst index 4598b0d90b60..164960631925 100644 --- a/Documentation/filesystems/ramfs-rootfs-initramfs.rst +++ b/Documentation/filesystems/ramfs-rootfs-initramfs.rst @@ -170,7 +170,7 @@ Documentation/driver-api/early-userspace/early_userspace_support.rst for more de The kernel does not depend on external cpio tools. If you specify a directory instead of a configuration file, the kernel's build infrastructure creates a configuration file from that directory (usr/Makefile calls -usr/gen_initramfs_list.sh), and proceeds to package up that directory +usr/gen_initramfs.sh), and proceeds to package up that directory using the config file (by feeding it to usr/gen_init_cpio, which is created from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is entirely self-contained, and the kernel's boot-time extractor is also diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index c2ecc9894fd0..9a57e972dae4 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -772,7 +772,7 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER initial value when the blackhole issue goes away. 0 to disable the blackhole detection.
- By default, it is set to 1hr. + By default, it is set to 0 (feature is disabled).
tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs The list consists of a primary key and an optional backup key. The diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index b71e09f745c3..f99be8062bc8 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -191,7 +191,7 @@ Documentation written by Tom Zanussi with the event, in nanoseconds. May be modified by .usecs to have timestamps interpreted as microseconds. - cpu int the cpu on which the event occurred. + common_cpu int the cpu on which the event occurred. ====================== ==== =======================================
Extended error information diff --git a/Makefile b/Makefile index 41be12f806e0..96967f895193 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 13 -SUBLEVEL = 5 +SUBLEVEL = 6 EXTRAVERSION = NAME = Opossums on Parade
diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts index dcab6e78dfa4..8be40c8283af 100644 --- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts +++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts @@ -4,6 +4,7 @@ #include "aspeed-g5.dtsi" #include <dt-bindings/gpio/aspeed-gpio.h> #include <dt-bindings/i2c/i2c.h> +#include <dt-bindings/interrupt-controller/irq.h>
/{ model = "ASRock E3C246D4I BMC"; @@ -73,7 +74,8 @@ &uart5 {
&vuart { status = "okay"; - aspeed,sirq-active-high; + aspeed,lpc-io-reg = <0x2f8>; + aspeed,lpc-interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; };
&mac0 { diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 52a0400fdd92..d9abaae118dd 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -821,7 +821,7 @@ CONFIG_USB_ISP1760=y CONFIG_USB_HSIC_USB3503=y CONFIG_AB8500_USB=y CONFIG_KEYSTONE_USB_PHY=m -CONFIG_NOP_USB_XCEIV=m +CONFIG_NOP_USB_XCEIV=y CONFIG_AM335X_PHY_USB=m CONFIG_TWL6030_USB=m CONFIG_USB_GPIO_VBUS=y diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 787c3c83edd7..bea3c5e125ee 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector # It's not safe to invoke KCOV when portions of the kernel environment aren't # available or are out-of-sync with HW state. Since `noinstr` doesn't always # inhibit KCOV instrumentation, disable it for the entire compilation unit. -KCOV_INSTRUMENT_entry.o := n +KCOV_INSTRUMENT_entry-common.o := n
# Object file lists. obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 125a10e413e9..23e9879a6e78 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -185,18 +185,6 @@ void mte_check_tfsr_el1(void) } #endif
-static void update_gcr_el1_excl(u64 excl) -{ - - /* - * Note that the mask controlled by the user via prctl() is an - * include while GCR_EL1 accepts an exclude mask. - * No need for ISB since this only affects EL0 currently, implicit - * with ERET. - */ - sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); -} - static void set_gcr_el1_excl(u64 excl) { current->thread.gcr_user_excl = excl; @@ -257,7 +245,8 @@ void mte_suspend_exit(void) if (!system_supports_mte()) return;
- update_gcr_el1_excl(gcr_kernel_excl); + sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl); + isb(); }
long set_mte_ctrl(struct task_struct *task, unsigned long arg) diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c index c206b31ce07a..1bdf5e7d1b43 100644 --- a/arch/nds32/mm/mmap.c +++ b/arch/nds32/mm/mmap.c @@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 67cc164c4ac1..395f98158e81 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2445,8 +2445,10 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX; if (cpu_has_feature(CPU_FTR_HVMODE)) { vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) vcpu->arch.hfscr |= HFSCR_TM; +#endif } if (cpu_has_feature(CPU_FTR_TM_COMP)) vcpu->arch.hfscr |= HFSCR_TM; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 1b3ff0af1264..5252107d2f7a 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -301,6 +301,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.l1_ptcr == 0) return H_NOT_AVAILABLE;
+ if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) + return H_BAD_MODE; + /* copy parameters in */ hv_ptr = kvmppc_get_gpr(vcpu, 4); regs_ptr = kvmppc_get_gpr(vcpu, 5); @@ -321,6 +324,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) if (l2_hv.vcpu_token >= NR_CPUS) return H_PARAMETER;
+ /* + * L1 must have set up a suspended state to enter the L2 in a + * transactional state, and only in that case. These have to be + * filtered out here to prevent causing a TM Bad Thing in the + * host HRFID. We could synthesize a TM Bad Thing back to the L1 + * here but there doesn't seem like much point. + */ + if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) { + if (!MSR_TM_ACTIVE(l2_regs.msr)) + return H_BAD_MODE; + } else { + if (l2_regs.msr & MSR_TS_MASK) + return H_BAD_MODE; + if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK)) + return H_BAD_MODE; + } + /* translate lpid */ l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); if (!l2) diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index c5e677508d3b..0f847f1e5ddd 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -242,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) * value so we can restore it on the way out. */ orig_rets = args.rets; + if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { + /* + * Don't overflow our args array: ensure there is room for + * at least rets[0] (even if the call specifies 0 nret). + * + * Each handler must then check for the correct nargs and nret + * values, but they may always return failure in rets[0]. + */ + rc = -EINVAL; + goto fail; + } args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->arch.rtas_token_lock); @@ -269,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) fail: /* * We only get here if the guest has called RTAS with a bogus - * args pointer. That means we can't get to the args, and so we - * can't fail the RTAS call. So fail right out to userspace, - * which should kill the guest. + * args pointer or nargs/nret values that would overflow the + * array. That means we can't get to the args, and so we can't + * fail the RTAS call. So fail right out to userspace, which + * should kill the guest. + * + * SLOF should actually pass the hcall return value from the + * rtas handler call in r3, so enter_rtas could be modified to + * return a failure indication in r3 and we could return such + * errors to the guest rather than failing to host userspace. + * However old guests that don't test for failure could then + * continue silently after errors, so for now we won't do this. */ return rc; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index a2a68a958fa0..6e4f03c02a0a 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -2045,9 +2045,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, { struct kvm_enable_cap cap; r = -EFAULT; - vcpu_load(vcpu); if (copy_from_user(&cap, argp, sizeof(cap))) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); vcpu_put(vcpu); break; @@ -2071,9 +2071,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_DIRTY_TLB: { struct kvm_dirty_tlb dirty; r = -EFAULT; - vcpu_load(vcpu); if (copy_from_user(&dirty, argp, sizeof(dirty))) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); vcpu_put(vcpu); break; diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h index 6d98cd999680..7b3483ba2e84 100644 --- a/arch/riscv/include/asm/efi.h +++ b/arch/riscv/include/asm/efi.h @@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
-/* Load initrd at enough distance from DRAM start */ +/* Load initrd anywhere in system RAM */ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) { - return image_addr + SZ_256M; + return ULONG_MAX; }
#define alloc_screen_info(x...) (&screen_info) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4c4c92ce0bb8..9b23b95c50cf 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -123,7 +123,7 @@ void __init setup_bootmem(void) { phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); - phys_addr_t dram_end = memblock_end_of_DRAM(); + phys_addr_t dram_end; phys_addr_t max_mapped_addr = __pa(~(ulong)0);
#ifdef CONFIG_XIP_KERNEL @@ -146,6 +146,8 @@ void __init setup_bootmem(void) #endif memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+ dram_end = memblock_end_of_DRAM(); + /* * memblock allocator is not aware of the fact that last 4K bytes of * the addressable memory can not be mapped because of IS_ERR_VALUE diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S index f7c77cd518f2..5ff5fee02801 100644 --- a/arch/s390/boot/text_dma.S +++ b/arch/s390/boot/text_dma.S @@ -9,16 +9,6 @@ #include <asm/errno.h> #include <asm/sigp.h>
-#ifdef CC_USING_EXPOLINE - .pushsection .dma.text.__s390_indirect_jump_r14,"axG" -__dma__s390_indirect_jump_r14: - larl %r1,0f - ex 0,0(%r1) - j . -0: br %r14 - .popsection -#endif - .section .dma.text,"ax" /* * Simplified version of expoline thunk. The normal thunks can not be used here, @@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14: * affects a few functions that are not performance-relevant. */ .macro BR_EX_DMA_r14 -#ifdef CC_USING_EXPOLINE - jg __dma__s390_indirect_jump_r14 -#else - br %r14 -#endif + larl %r1,0f + ex 0,0(%r1) + j . +0: br %r14 .endm
/* diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 695c61989f97..345cbe982a8b 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -19,6 +19,7 @@ void ftrace_caller(void);
extern char ftrace_graph_caller_end; extern unsigned long ftrace_plt; +extern void *ftrace_func;
struct dyn_arch_ftrace { };
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index c6ddeb5029b4..2d8f595d9196 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -40,6 +40,7 @@ * trampoline (ftrace_plt), which clobbers also r1. */
+void *ftrace_func __read_mostly = ftrace_stub; unsigned long ftrace_plt;
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, @@ -85,6 +86,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_update_ftrace_func(ftrace_func_t func) { + ftrace_func = func; return 0; }
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index faf64c2f90f5..6b13797143a7 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -59,13 +59,13 @@ ENTRY(ftrace_caller) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES aghik %r2,%r0,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op - lgrl %r1,ftrace_trace_function + lgrl %r1,ftrace_func #else lgr %r2,%r0 aghi %r2,-MCOUNT_INSN_SIZE larl %r4,function_trace_op lg %r4,0(%r4) - larl %r1,ftrace_trace_function + larl %r1,ftrace_func lg %r1,0(%r1) #endif lgr %r3,%r14 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 63cae0476bb4..2ae419f5115a 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -112,7 +112,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) { u32 r1 = reg2hex[b1];
- if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15) + if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) jit->seen_reg[r1] = 1; }
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ca7866d63e98..739be5da3bca 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -765,7 +765,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); edx.split.bit_width_fixed = cap.bit_width_fixed; - edx.split.anythread_deprecated = 1; + if (cap.version) + edx.split.anythread_deprecated = 1; edx.split.reserved1 = 0; edx.split.reserved2 = 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 8d36f0c73071..02d60d7f903d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1271,8 +1271,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Pin guest memory */ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) - return -EFAULT; + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page);
/* allocate memory for header and transport buffer */ ret = -ENOMEM; @@ -1309,8 +1309,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) }
/* Copy packet header to userspace. */ - ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, - params.hdr_len); + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + ret = -EFAULT;
e_free_trans_data: kfree(trans_data); @@ -1462,11 +1463,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) data.trans_len = params.trans_len;
/* Pin guest memory */ - ret = -EFAULT; guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); goto e_free_trans; + }
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index eedec61e3476..226f849fe7dc 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -370,7 +370,7 @@ config ACPI_TABLE_UPGRADE config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD bool "Override ACPI tables from built-in initrd" depends on ACPI_TABLE_UPGRADE - depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION="" + depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION_NONE help This option provides functionality to override arbitrary ACPI tables from built-in uncompressed initrd. diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 3b54b8fd7396..27ec9d57f3b8 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -846,11 +846,9 @@ EXPORT_SYMBOL(acpi_dev_present); * Return the next match of ACPI device if another matching device was present * at the moment of invocation, or NULL otherwise. * - * FIXME: The function does not tolerate the sudden disappearance of @adev, e.g. - * in the case of a hotplug event. That said, the caller should ensure that - * this will never happen. - * * The caller is responsible for invoking acpi_dev_put() on the returned device. + * On the other hand the function invokes acpi_dev_put() on the given @adev + * assuming that its reference counter had been increased beforehand. * * See additional information in acpi_dev_present() as well. */ @@ -866,6 +864,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, start, &match, acpi_dev_match_cb); + acpi_dev_put(adev); return dev ? to_acpi_device(dev) : NULL; } EXPORT_SYMBOL(acpi_dev_get_next_match_dev); diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index adc199dfba3c..6a30264ab2ba 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -231,6 +231,8 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device); int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, const char *modname) { + int ret; + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) return -EINVAL;
@@ -246,7 +248,11 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, auxdrv->driver.bus = &auxiliary_bus_type; auxdrv->driver.mod_name = modname;
- return driver_register(&auxdrv->driver); + ret = driver_register(&auxdrv->driver); + if (ret) + kfree(auxdrv->driver.name); + + return ret; } EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
diff --git a/drivers/base/core.c b/drivers/base/core.c index 54ba506e5a89..042b13d88f17 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -574,8 +574,10 @@ static void devlink_remove_symlinks(struct device *dev, return; }
- snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); - sysfs_remove_link(&con->kobj, buf); + if (device_is_registered(con)) { + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + sysfs_remove_link(&con->kobj, buf); + } snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); sysfs_remove_link(&sup->kobj, buf); kfree(buf); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index bbb88eb009e0..7af1c53c5cf7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4100,8 +4100,6 @@ static void rbd_acquire_lock(struct work_struct *work)
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) { - bool need_wait; - dout("%s rbd_dev %p\n", __func__, rbd_dev); lockdep_assert_held_write(&rbd_dev->lock_rwsem);
@@ -4113,11 +4111,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) */ rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; rbd_assert(!completion_done(&rbd_dev->releasing_wait)); - need_wait = !list_empty(&rbd_dev->running_list); - downgrade_write(&rbd_dev->lock_rwsem); - if (need_wait) - wait_for_completion(&rbd_dev->releasing_wait); - up_read(&rbd_dev->lock_rwsem); + if (list_empty(&rbd_dev->running_list)) + return true; + + up_write(&rbd_dev->lock_rwsem); + wait_for_completion(&rbd_dev->releasing_wait);
down_write(&rbd_dev->lock_rwsem); if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) @@ -4203,15 +4201,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { down_write(&rbd_dev->lock_rwsem); if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { - /* - * we already know that the remote client is - * the owner - */ - up_write(&rbd_dev->lock_rwsem); - return; + dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n", + __func__, rbd_dev, cid.gid, cid.handle); + } else { + rbd_set_owner_cid(rbd_dev, &cid); } - - rbd_set_owner_cid(rbd_dev, &cid); downgrade_write(&rbd_dev->lock_rwsem); } else { down_read(&rbd_dev->lock_rwsem); @@ -4236,14 +4230,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { down_write(&rbd_dev->lock_rwsem); if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { - dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n", + dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n", __func__, rbd_dev, cid.gid, cid.handle, rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); - up_write(&rbd_dev->lock_rwsem); - return; + } else { + rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); } - - rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); downgrade_write(&rbd_dev->lock_rwsem); } else { down_read(&rbd_dev->lock_rwsem); diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 22acde118bc3..fc9196f11cb7 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -773,11 +773,18 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - write_lock_bh(&mhi_chan->lock); - mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); - complete(&mhi_chan->completion); - write_unlock_bh(&mhi_chan->lock); + + if (chan < mhi_cntrl->max_chan && + mhi_cntrl->mhi_chan[chan].configured) { + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } else { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Completion packet for invalid channel ID: %d\n", chan); + }
mhi_del_ring_element(mhi_cntrl, mhi_ring); } diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index ca3bc40427f8..4dd1077354af 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -32,6 +32,8 @@ * @edl: emergency download mode firmware path (if any) * @bar_num: PCI base address register to use for MHI MMIO register space * @dma_data_width: DMA transfer word size (32 or 64 bits) + * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead + * of inband wake support (such as sdx24) */ struct mhi_pci_dev_info { const struct mhi_controller_config *config; @@ -40,6 +42,7 @@ struct mhi_pci_dev_info { const char *edl; unsigned int bar_num; unsigned int dma_data_width; + bool sideband_wake; };
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ @@ -72,6 +75,22 @@ struct mhi_pci_dev_info { .doorbell_mode_switch = false, \ }
+#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + .auto_queue = true, \ + } + #define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \ { \ .num_elements = el_count, \ @@ -210,7 +229,7 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), - MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), @@ -242,7 +261,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .edl = "qcom/sdx65m/edl.mbn", .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = false, };
static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { @@ -251,7 +271,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { .edl = "qcom/sdx55m/edl.mbn", .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = false, };
static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { @@ -259,7 +280,8 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = { .edl = "qcom/prog_firehose_sdx24.mbn", .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = true, };
static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = { @@ -301,7 +323,8 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { .edl = "qcom/prog_firehose_sdx24.mbn", .config = &modem_quectel_em1xx_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = true, };
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { @@ -339,7 +362,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .edl = "qcom/sdx55m/edl.mbn", .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, - .dma_data_width = 32 + .dma_data_width = 32, + .sideband_wake = false, };
static const struct pci_device_id mhi_pci_id_table[] = { @@ -640,9 +664,12 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl->status_cb = mhi_pci_status_cb; mhi_cntrl->runtime_get = mhi_pci_runtime_get; mhi_cntrl->runtime_put = mhi_pci_runtime_put; - mhi_cntrl->wake_get = mhi_pci_wake_get_nop; - mhi_cntrl->wake_put = mhi_pci_wake_put_nop; - mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; + + if (info->sideband_wake) { + mhi_cntrl->wake_get = mhi_pci_wake_get_nop; + mhi_cntrl->wake_put = mhi_pci_wake_put_nop; + mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; + }
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); if (err) diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 784cf0027da3..9184a0d5acbe 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -139,6 +139,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner, { int retval;
+ if (!driver->probe) + return -EINVAL; + retval = scmi_protocol_device_request(driver->id_table); if (retval) return retval; diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index 5c9625e552f4..eb9c65f97841 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -12,52 +12,38 @@ #include <linux/efi.h> #include <linux/pci.h>
-struct acpi_hid_uid { - struct acpi_device_id hid[2]; - char uid[11]; /* UINT_MAX + null byte */ -}; - -static int __init match_acpi_dev(struct device *dev, const void *data) -{ - struct acpi_hid_uid hid_uid = *(const struct acpi_hid_uid *)data; - struct acpi_device *adev = to_acpi_device(dev); - - if (acpi_match_device_ids(adev, hid_uid.hid)) - return 0; - - if (adev->pnp.unique_id) - return !strcmp(adev->pnp.unique_id, hid_uid.uid); - else - return !strcmp("0", hid_uid.uid); -} - static long __init parse_acpi_path(const struct efi_dev_path *node, struct device *parent, struct device **child) { - struct acpi_hid_uid hid_uid = {}; + char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */ + struct acpi_device *adev; struct device *phys_dev;
if (node->header.length != 12) return -EINVAL;
- sprintf(hid_uid.hid[0].id, "%c%c%c%04X", + sprintf(hid, "%c%c%c%04X", 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1, 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1, 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1, node->acpi.hid >> 16); - sprintf(hid_uid.uid, "%u", node->acpi.uid); + sprintf(uid, "%u", node->acpi.uid);
- *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid, - match_acpi_dev); - if (!*child) + for_each_acpi_dev_match(adev, hid, NULL, -1) { + if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid)) + break; + if (!adev->pnp.unique_id && node->acpi.uid == 0) + break; + } + if (!adev) return -ENODEV;
- phys_dev = acpi_get_first_physical_node(to_acpi_device(*child)); + phys_dev = acpi_get_first_physical_node(adev); if (phys_dev) { - get_device(phys_dev); - put_device(*child); - *child = phys_dev; - } + *child = get_device(phys_dev); + acpi_dev_put(adev); + } else + *child = &adev->dev;
return 0; } diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4b7ee3fa9224..847f33ffc4ae 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -896,6 +896,7 @@ static int __init efi_memreserve_map_root(void) static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) { struct resource *res, *parent; + int ret;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC); if (!res) @@ -908,7 +909,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
/* we expect a conflict with a 'System RAM' region */ parent = request_resource_conflict(&iomem_resource, res); - return parent ? request_resource(parent, res) : 0; + ret = parent ? request_resource(parent, res) : 0; + + /* + * Given that efi_mem_reserve_iomem() can be called at any + * time, only call memblock_reserve() if the architecture + * keeps the infrastructure around. + */ + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) + memblock_reserve(addr, size); + + return ret; }
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index c1955d320fec..8f665678e9e3 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void) tbl_size = sizeof(*log_tbl) + log_tbl->size; memblock_reserve(efi.tpm_log, tbl_size);
- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || - log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { - pr_warn(FW_BUG "TPM Final Events table missing or invalid\n"); + if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) { + pr_info("TPM Final Events table not present\n"); + goto out; + } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { + pr_warn(FW_BUG "TPM Final Events table invalid\n"); goto out; }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 9ea5b4d2fe8b..ef65c20feda7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3290,6 +3290,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) }; @@ -3369,6 +3370,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1_Vangogh, 0xffffffff, 0x00070103), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQG_CONFIG, 0x000017ff, 0x00001000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00400000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), @@ -3411,6 +3413,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSX_DEBUG_1, 0x00010000, 0x00010020), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x01030000, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x03a00000, 0x00a00000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 495a4767a443..94591c1a748f 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -827,6 +827,9 @@ long drm_ioctl(struct file *filp, if (drm_dev_is_unplugged(dev)) return -ENODEV;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE) + return -ENOTTY; + is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index dda320749c65..2358c92733b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1977,6 +1977,21 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (drm_WARN_ON(&i915->drm, !engine)) return -EINVAL;
+ /* + * Due to d3_entered is used to indicate skipping PPGTT invalidation on + * vGPU reset, it's set on D0->D3 on PCI config write, and cleared after + * vGPU reset if in resuming. + * In S0ix exit, the device power state also transite from D3 to D0 as + * S3 resume, but no vGPU reset (triggered by QEMU devic model). After + * S0ix exit, all engines continue to work. However the d3_entered + * remains set which will break next vGPU reset logic (miss the expected + * PPGTT invalidation). + * Engines can only work in D0. Thus the 1st elsp write gives GVT a + * chance to clear d3_entered. + */ + if (vgpu->d3_entered) + vgpu->d3_entered = false; + execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index 5e9ccefb88f6..bbdd086be7f5 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -447,7 +447,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c) drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi); - kfree(ts->dsi);
return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 3d9c62b93e29..ef6e0c042bb1 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -100,6 +100,8 @@ static int ttm_global_init(void) debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root, &glob->bo_count); out: + if (ret) + --ttm_glob_use_count; mutex_unlock(&ttm_global_mutex); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 188b74c9e9ff..edee565334d8 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1690,38 +1690,46 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (vc4_hdmi->variant->external_irq_controller) { - ret = devm_request_threaded_irq(&pdev->dev, - platform_get_irq_byname(pdev, "cec-rx"), - vc4_cec_irq_handler_rx_bare, - vc4_cec_irq_handler_rx_thread, 0, - "vc4 hdmi cec rx", vc4_hdmi); + ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"), + vc4_cec_irq_handler_rx_bare, + vc4_cec_irq_handler_rx_thread, 0, + "vc4 hdmi cec rx", vc4_hdmi); if (ret) goto err_delete_cec_adap;
- ret = devm_request_threaded_irq(&pdev->dev, - platform_get_irq_byname(pdev, "cec-tx"), - vc4_cec_irq_handler_tx_bare, - vc4_cec_irq_handler_tx_thread, 0, - "vc4 hdmi cec tx", vc4_hdmi); + ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"), + vc4_cec_irq_handler_tx_bare, + vc4_cec_irq_handler_tx_thread, 0, + "vc4 hdmi cec tx", vc4_hdmi); if (ret) - goto err_delete_cec_adap; + goto err_remove_cec_rx_handler; } else { HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
- ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0), - vc4_cec_irq_handler, - vc4_cec_irq_handler_thread, 0, - "vc4 hdmi cec", vc4_hdmi); + ret = request_threaded_irq(platform_get_irq(pdev, 0), + vc4_cec_irq_handler, + vc4_cec_irq_handler_thread, 0, + "vc4 hdmi cec", vc4_hdmi); if (ret) goto err_delete_cec_adap; }
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev); if (ret < 0) - goto err_delete_cec_adap; + goto err_remove_handlers;
return 0;
+err_remove_handlers: + if (vc4_hdmi->variant->external_irq_controller) + free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi); + else + free_irq(platform_get_irq(pdev, 0), vc4_hdmi); + +err_remove_cec_rx_handler: + if (vc4_hdmi->variant->external_irq_controller) + free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi); + err_delete_cec_adap: cec_delete_adapter(vc4_hdmi->cec_adap);
@@ -1730,6 +1738,15 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) { + struct platform_device *pdev = vc4_hdmi->pdev; + + if (vc4_hdmi->variant->external_irq_controller) { + free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi); + free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi); + } else { + free_irq(platform_get_irq(pdev, 0), vc4_hdmi); + } + cec_unregister_adapter(vc4_hdmi->cec_adap); } #else diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 5648664f71bc..f2d625415458 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ttm_bo_unpin(bo); ttm_bo_unreserve(bo);
- ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; } diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 6d5014ebaab5..a6ea1eb1394e 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -635,8 +635,8 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
status = readb(i2c->base + MPC_I2C_SR); if (status & CSR_MIF) { - /* Read again to allow register to stabilise */ - status = readb(i2c->base + MPC_I2C_SR); + /* Wait up to 100us for transfer to properly complete */ + readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100); writeb(0, i2c->base + MPC_I2C_SR); mpc_i2c_do_intr(i2c, status); return IRQ_HANDLED; diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c index 4657e99df033..59a36f922675 100644 --- a/drivers/media/pci/intel/ipu3/cio2-bridge.c +++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c @@ -173,10 +173,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) { - if (!adev->status.enabled) { - acpi_dev_put(adev); + if (!adev->status.enabled) continue; - }
if (bridge->n_sensors >= CIO2_NUM_PORTS) { acpi_dev_put(adev); @@ -185,7 +183,6 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, }
sensor = &bridge->sensors[bridge->n_sensors]; - sensor->adev = adev; strscpy(sensor->name, cfg->hid, sizeof(sensor->name));
ret = cio2_bridge_read_acpi_buffer(adev, "SSDB", @@ -215,6 +212,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg, goto err_free_swnodes; }
+ sensor->adev = acpi_dev_get(adev); adev->fwnode.secondary = fwnode;
dev_info(&cio2->dev, "Found supported sensor %s\n", diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 07f342db6701..7481f553f959 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER; com.cmd.hdr.Length = 6; - memcpy(&com.cmd.ConfigureBuffers.config, config, 6); + memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6); com.in_len = 6; com.out_len = 0;
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index 84f04e0e0cb9..3d296f1998a1 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h @@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
struct FW_CONFIGURE_FREE_BUFFERS { struct FW_HEADER hdr; - u8 UVI1_BufferLength; - u8 UVI2_BufferLength; - u8 TVO_BufferLength; - u8 AUD1_BufferLength; - u8 AUD2_BufferLength; - u8 TVA_BufferLength; + struct { + u8 UVI1_BufferLength; + u8 UVI2_BufferLength; + u8 TVO_BufferLength; + u8 AUD1_BufferLength; + u8 AUD2_BufferLength; + u8 TVA_BufferLength; + } __packed config; } __attribute__ ((__packed__));
struct FW_CONFIGURE_UART { diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 7a6f01ace78a..305ffad131a2 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -714,23 +714,20 @@ static int at24_probe(struct i2c_client *client) }
/* - * If the 'label' property is not present for the AT24 EEPROM, - * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO, - * and this will append the 'devid' to the name of the NVMEM - * device. This is purely legacy and the AT24 driver has always - * defaulted to this. However, if the 'label' property is - * present then this means that the name is specified by the - * firmware and this name should be used verbatim and so it is - * not necessary to append the 'devid'. + * We initialize nvmem_config.id to NVMEM_DEVID_AUTO even if the + * label property is set as some platform can have multiple eeproms + * with same label and we can not register each of those with same + * label. Failing to register those eeproms trigger cascade failure + * on such platform. */ + nvmem_config.id = NVMEM_DEVID_AUTO; + if (device_property_present(dev, "label")) { - nvmem_config.id = NVMEM_DEVID_NONE; err = device_property_read_string(dev, "label", &nvmem_config.name); if (err) return err; } else { - nvmem_config.id = NVMEM_DEVID_AUTO; nvmem_config.name = dev_name(dev); }
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 0b0577990ddc..8375d4381d47 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); wakeup_source_unregister(host->ws); - ida_simple_remove(&mmc_host_ida, host->index); + if (of_alias_get_id(host->parent->of_node, "mmc") < 0) + ida_simple_remove(&mmc_host_ida, host->index); kfree(host); }
@@ -499,7 +500,7 @@ static int mmc_first_nonreserved_index(void) */ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) { - int err; + int index; struct mmc_host *host; int alias_id, min_idx, max_idx;
@@ -512,20 +513,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
alias_id = of_alias_get_id(dev->of_node, "mmc"); if (alias_id >= 0) { - min_idx = alias_id; - max_idx = alias_id + 1; + index = alias_id; } else { min_idx = mmc_first_nonreserved_index(); max_idx = 0; - }
- err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); - if (err < 0) { - kfree(host); - return NULL; + index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); + if (index < 0) { + kfree(host); + return NULL; + } }
- host->index = err; + host->index = index;
dev_set_name(&host->class_dev, "mmc%d", host->index); host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev)); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c5a646d06102..9a184c99fbe4 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, static int bond_ipsec_add_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; + int err;
if (!bond_dev) return -EINVAL;
+ rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); - xs->xso.real_dev = slave->dev; - bond->xs = xs; + if (!slave) { + rcu_read_unlock(); + return -ENODEV; + }
- if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + rcu_read_unlock(); return -EINVAL; }
- return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); + if (!ipsec) { + rcu_read_unlock(); + return -ENOMEM; + } + xs->xso.real_dev = slave->dev; + + err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + if (!err) { + ipsec->xs = xs; + INIT_LIST_HEAD(&ipsec->list); + spin_lock_bh(&bond->ipsec_lock); + list_add(&ipsec->list, &bond->ipsec_list); + spin_unlock_bh(&bond->ipsec_lock); + } else { + kfree(ipsec); + } + rcu_read_unlock(); + return err; +} + +static void bond_ipsec_add_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) + goto out; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { + spin_lock_bh(&bond->ipsec_lock); + if (!list_empty(&bond->ipsec_list)) + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_add\n", + __func__); + spin_unlock_bh(&bond->ipsec_lock); + goto out; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + ipsec->xs->xso.real_dev = slave->dev; + if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { + slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); + ipsec->xs->xso.real_dev = NULL; + } + } + spin_unlock_bh(&bond->ipsec_lock); +out: + rcu_read_unlock(); }
/** @@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) static void bond_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave;
if (!bond_dev) return;
+ rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave);
if (!slave) - return; + goto out;
- xs->xso.real_dev = slave->dev; + if (!xs->xso.real_dev) + goto out; + + WARN_ON(xs->xso.real_dev != slave->dev);
- if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); - return; + goto out; }
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +out: + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (ipsec->xs == xs) { + list_del(&ipsec->list); + kfree(ipsec); + break; + } + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); +} + +static void bond_ipsec_del_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (!ipsec->xs->xso.real_dev) + continue; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_delete\n", + __func__); + } else { + slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); + } + ipsec->xs->xso.real_dev = NULL; + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); }
/** @@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *curr_active = rcu_dereference(bond->curr_active_slave); - struct net_device *slave_dev = curr_active->dev; + struct net_device *real_dev; + struct slave *curr_active; + struct bonding *bond; + int err;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - return true; + bond = netdev_priv(bond_dev); + rcu_read_lock(); + curr_active = rcu_dereference(bond->curr_active_slave); + real_dev = curr_active->dev;
- if (!(slave_dev->xfrmdev_ops - && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { - slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); - return false; + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { + err = false; + goto out; }
- xs->xso.real_dev = slave_dev; - return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); + if (!xs->xso.real_dev) { + err = false; + goto out; + } + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_offload_ok || + netif_is_bond_master(real_dev)) { + err = false; + goto out; + } + + err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +out: + rcu_read_unlock(); + return err; }
static const struct xfrmdev_ops bond_xfrmdev_ops = { @@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return;
#ifdef CONFIG_XFRM_OFFLOAD - if (old_active && bond->xs) - bond_ipsec_del_sa(bond->xs); + bond_ipsec_del_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */
if (new_active) { @@ -1067,10 +1193,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) }
#ifdef CONFIG_XFRM_OFFLOAD - if (new_active && bond->xs) { - xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); - bond_ipsec_add_sa(bond->xs); - } + bond_ipsec_add_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */
/* resend IGMP joins since active slave has changed or @@ -3319,6 +3442,9 @@ static int bond_master_netdev_event(unsigned long event, return bond_event_changename(event_bond); case NETDEV_UNREGISTER: bond_remove_proc_entry(event_bond); +#ifdef CONFIG_XFRM_OFFLOAD + xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); +#endif /* CONFIG_XFRM_OFFLOAD */ break; case NETDEV_REGISTER: bond_create_proc_entry(event_bond); @@ -4882,7 +5008,8 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; - bond->xs = NULL; + INIT_LIST_HEAD(&bond->ipsec_list); + spin_lock_init(&bond->ipsec_lock); #endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */ diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 05af632b0f59..634a48e6616b 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX config NET_DSA_MV88E6XXX_PTP bool "PTP support for Marvell 88E6xxx" default n - depends on PTP_1588_CLOCK + depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index ebe4d33cda27..6e5dbe9f3892 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -378,6 +378,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) if (dsa_is_cpu_port(ds, port)) v->pvid = true; list_add(&v->list, &priv->dsa_8021q_vlans); + + v = kmemdup(v, sizeof(*v), GFP_KERNEL); + if (!v) + return -ENOMEM; + + list_add(&v->list, &priv->bridge_vlans); }
((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aef3fccc27a9..3c3aa9467310 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1640,11 +1640,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { - u16 vlan_proto = tpa_info->metadata >> - RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + return NULL; + } }
skb_checksum_none_assert(skb); @@ -1865,9 +1870,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(meta_data >> + RX_CMP_FLAGS2_METADATA_TPID_SFT);
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + goto next_rx; + } }
skb_checksum_none_assert(skb); @@ -10093,6 +10104,12 @@ int bnxt_half_open_nic(struct bnxt *bp) { int rc = 0;
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); + rc = -ENODEV; + goto half_open_err; + } + rc = bnxt_alloc_mem(bp, false); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); @@ -11849,10 +11866,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp) (bp->fw_reset_max_dsecs * HZ / 10)); }
+static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) +{ + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { + bnxt_ulp_start(bp, rc); + bnxt_dl_health_status_update(bp, false); + } + bp->fw_reset_state = 0; + dev_close(bp->dev); +} + static void bnxt_fw_reset_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); - int rc; + int rc = 0;
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); @@ -11882,6 +11910,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_timestamp = jiffies; rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; + } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; @@ -11929,6 +11962,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (val == 0xffff) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + rc = -ETIMEDOUT; goto fw_reset_abort; } bnxt_queue_fw_reset_work(bp, HZ / 1000); @@ -11938,6 +11972,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); if (pci_enable_device(bp->pdev)) { netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + rc = -ENODEV; goto fw_reset_abort; } pci_set_master(bp->pdev); @@ -11964,9 +11999,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } rc = bnxt_open(bp->dev); if (rc) { - netdev_err(bp->dev, "bnxt_open_nic() failed\n"); - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - dev_close(bp->dev); + netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; }
bp->fw_reset_state = 0; @@ -11993,12 +12029,8 @@ static void bnxt_fw_reset_task(struct work_struct *work) netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); } fw_reset_abort: - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) - bnxt_dl_health_status_update(bp, false); - bp->fw_reset_state = 0; rtnl_lock(); - dev_close(bp->dev); + bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); }
@@ -13315,7 +13347,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev);
- pci_disable_device(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index a918e374f3c5..187ff643ad2a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) if (!edev) return ERR_PTR(-ENOMEM); edev->en_ops = &bnxt_en_ops_tbl; - if (bp->flags & BNXT_FLAG_ROCEV1_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; - if (bp->flags & BNXT_FLAG_ROCEV2_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; edev->net = dev; edev->pdev = bp->pdev; edev->l2_db_size = bp->db_size; edev->l2_db_size_nc = bp->db_size; bp->edev = edev; } + edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP; + if (bp->flags & BNXT_FLAG_ROCEV1_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; + if (bp->flags & BNXT_FLAG_ROCEV2_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; return bp->edev; } EXPORT_SYMBOL(bnxt_ulp_probe); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 4cddd628d41b..9ed3d1ab2ca5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) * bits 32:47 indicate the PVF num. */ for (q_no = 0; q_no < ern; q_no++) { - reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; + reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
/* for VF assigned queues. */ if (q_no < oct->sriov_info.pf_srn) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 762113a04dde..9f62ffe64781 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap) { unsigned int i;
+ if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); list_del(&adap->list_node);
@@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq);
- if (is_uld(adapter)) { - detach_ulds(adapter); - t4_uld_clean_up(adapter); - } + detach_ulds(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + unregister_netdev(adapter->port[i]); + + t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
- for_each_port(adapter, i) - if (adapter->port[i]->reg_state == NETREG_REGISTERED) - unregister_netdev(adapter->port[i]); - debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 743af9e654aa..17faac715882 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap) { unsigned int i;
+ if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 05de37c3b64c..87321b7239cf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -2770,32 +2770,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) if (err) return err;
- err = dpaa2_switch_seed_bp(ethsw); - if (err) - goto err_free_dpbp; - err = dpaa2_switch_alloc_rings(ethsw); if (err) - goto err_drain_dpbp; + goto err_free_dpbp;
err = dpaa2_switch_setup_dpio(ethsw); if (err) goto err_destroy_rings;
+ err = dpaa2_switch_seed_bp(ethsw); + if (err) + goto err_deregister_dpio; + err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); if (err) { dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); - goto err_deregister_dpio; + goto err_drain_dpbp; }
return 0;
+err_drain_dpbp: + dpaa2_switch_drain_bp(ethsw); err_deregister_dpio: dpaa2_switch_free_dpio(ethsw); err_destroy_rings: dpaa2_switch_destroy_rings(ethsw); -err_drain_dpbp: - dpaa2_switch_drain_bp(ethsw); err_free_dpbp: dpaa2_switch_free_dpbp(ethsw);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 79cefe85a799..b43c6ff07614 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1349,13 +1349,16 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev); if (err) - goto abort_with_wq; + goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); gve_clear_probe_in_progress(priv); queue_work(priv->gve_wq, &priv->service_task); return 0;
+abort_with_gve_init: + gve_teardown_priv_resources(priv); + abort_with_wq: destroy_workqueue(priv->gve_wq);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 12f6c2442a7a..e53512f6878a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -131,7 +131,7 @@ /* buf unit size is cache_line_size, which is 64, so the shift is 6 */ #define PPE_BUF_SIZE_SHIFT 6 #define PPE_TX_BUF_HOLD BIT(31) -#define CACHE_LINE_MASK 0x3F +#define SOC_CACHE_LINE_MASK 0x3F #else #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 @@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) #if defined(CONFIG_HI13X1_GMAC) desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT); - desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK); - desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK); + desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK); + desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK); #else desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); desc->send_addr = (__force u32)cpu_to_be32(phys); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index a2c17af57fde..d283beec9f66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -135,7 +135,8 @@ struct hclge_mbx_vf_to_pf_cmd { u8 mbx_need_resp; u8 rsv1[1]; u8 msg_len; - u8 rsv2[3]; + u8 rsv2; + u16 match_id; struct hclge_vf_to_pf_msg msg; };
@@ -145,7 +146,8 @@ struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; u8 msg_len; - u8 rsv1[3]; + u8 rsv1; + u16 match_id; struct hclge_pf_to_vf_msg msg; };
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f1c9f4ada348..38b601031db4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0db51ef15ef6..fe03c8419890 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2621,6 +2621,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) { + struct hnae3_handle *nic = &hdev->nic; + int ret; + + ret = hclgevf_en_hw_strip_rxvtag(nic, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); + return ret; + } + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index efc98903c0b7..5b4a7ef7dffa 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1707,7 +1707,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; }
@@ -1729,6 +1728,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; goto out; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index dc0ded7e5e61..86b7778dc9b4 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7664,6 +7664,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 9e3103fae723..caedf24c24c1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2227,6 +2227,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_netdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e612c24fa384..44bafedd09f2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7b1885f9ce03..b0e900d1eae2 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) **/ static int igb_request_msix(struct igb_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0;
@@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++; @@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter) **/ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { - struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; u32 tqavcc, tqavctrl; u16 value;
WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and * with HIGH PRIO. If none is, then configure them with LOW PRIO and @@ -3615,6 +3623,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: @@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); }
+ tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 25871351730b..58e842cbf6ef 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -560,7 +560,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) if (hw->phy.ops.read_reg) return hw->phy.ops.read_reg(hw, offset, data);
- return 0; + return -EOPNOTSUPP; }
void igc_reinit_locked(struct igc_adapter *); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index f1adf154ec4a..a8d5f196fdbd 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -217,6 +217,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) DMA_TO_DEVICE); }
+ tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; @@ -5594,6 +5596,7 @@ static int igc_probe(struct pci_dev *pdev, err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2ac5b82676f3..42d57a73ce1a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { if (ring_uses_build_skb(rx_ring)) { - unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask;
dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, @@ -11069,6 +11070,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index caaea2c920a6..e3e4676af9e4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; int ret;
+ adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", xs->id.proto); @@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx;
+ adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index aad33d22c33f..3dc577183a40 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1287,6 +1287,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev, }
static int ocelot_netdevice_changeupper(struct net_device *dev, + struct net_device *brport_dev, struct netdev_notifier_changeupper_info *info) { struct netlink_ext_ack *extack; @@ -1296,11 +1297,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - err = ocelot_netdevice_bridge_join(dev, dev, + err = ocelot_netdevice_bridge_join(dev, brport_dev, info->upper_dev, extack); else - err = ocelot_netdevice_bridge_leave(dev, dev, + err = ocelot_netdevice_bridge_leave(dev, brport_dev, info->upper_dev); } if (netif_is_lag_master(info->upper_dev)) { @@ -1335,7 +1336,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, if (ocelot_port->bond != dev) return NOTIFY_OK;
- err = ocelot_netdevice_changeupper(lower, info); + err = ocelot_netdevice_changeupper(lower, dev, info); if (err) return notifier_from_errno(err); } @@ -1374,7 +1375,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev)) - return ocelot_netdevice_changeupper(dev, info); + return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev)) return ocelot_netdevice_lag_changeupper(dev, info); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a0d4e052a79e..b8eb1b2a8de3 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5085,7 +5085,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev)); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index a3ca406a3561..bb48a139dd15 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * maximum size. */ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); + tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); n_xdp_tx = num_possible_cpus(); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
@@ -181,7 +182,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, efx->xdp_tx_queue_count = 0; } else { efx->n_xdp_channels = n_xdp_ev; - efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL; + efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, @@ -891,18 +892,20 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); + /* We may have a few left-over XDP TX * queues owing to xdp_tx_queue_count * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. * We still allocate and probe those * TXQs, but never use them. */ - if (xdp_queue_number < efx->xdp_tx_queue_count) + if (xdp_queue_number < efx->xdp_tx_queue_count) { + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - xdp_queue_number++; + xdp_queue_number++; + } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -914,8 +917,7 @@ int efx_set_channels(struct efx_nic *efx) } } } - if (xdp_queue_number) - efx->xdp_tx_queue_count = xdp_queue_number; + WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 91cd5073ddb2..980a60477b02 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7170,6 +7170,7 @@ int stmmac_suspend(struct device *dev) priv->plat->rx_queues_to_use, false);
stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); }
priv->speed = SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index a696ada013eb..cad9e466353f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -399,6 +399,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + int phy_mode; void *ret; int rc;
@@ -414,10 +415,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) eth_zero_addr(mac); }
- plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) - return ERR_PTR(plat->phy_interface); + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) + return ERR_PTR(phy_mode);
+ plat->phy_interface = phy_mode; plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) plat->interface = plat->phy_interface; diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index bbbc6ac8fa82..53a433442803 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -78,6 +78,11 @@ enum { /* Temperature read register (88E2110 only) */ MV_PCS_TEMP = 0x8042,
+ /* Number of ports on the device */ + MV_PCS_PORT_INFO = 0xd00d, + MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380, + MV_PCS_PORT_INFO_NPORTS_SHIFT = 7, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. @@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = { #endif };
+static int mv3310_get_number_of_ports(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO); + if (ret < 0) + return ret; + + ret &= MV_PCS_PORT_INFO_NPORTS_MASK; + ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT; + + return ret + 1; +} + +static int mv3310_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 1; +} + +static int mv3340_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 4; +} + static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g) { int val; @@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = MARVELL_PHY_ID_88X3310, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3310_match_phy_device, .name = "mv88x3310", .driver_data = &mv3310_type, .get_features = mv3310_get_features, @@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = { .set_loopback = genphy_c45_loopback, }, { - .phy_id = MARVELL_PHY_ID_88X3340, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id = MARVELL_PHY_ID_88X3310, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3340_match_phy_device, .name = "mv88x3340", .driver_data = &mv3340_type, .get_features = mv3310_get_features, @@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK }, - { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK }, + { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK }, { }, }; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 5c779cc0ea11..28ebf4955b83 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2496,7 +2496,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); - goto exit; + goto err_hso_dev; }
hso_net = netdev_priv(net); @@ -2509,13 +2509,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); - goto exit; + goto err_net; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); - goto exit; + goto err_net; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); @@ -2524,18 +2524,18 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) - goto exit; + goto err_mux_bulk_rx; hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) - goto exit; + goto err_mux_bulk_rx; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) - goto exit; + goto err_mux_bulk_rx; hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) - goto exit; + goto err_free_tx_urb;
add_net_device(hso_dev);
@@ -2543,7 +2543,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, result = register_netdev(net); if (result) { dev_err(&interface->dev, "Failed to register device\n"); - goto exit; + goto err_free_tx_buf; }
hso_log_port(hso_dev); @@ -2551,8 +2551,21 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_create_rfkill(hso_dev, interface);
return hso_dev; -exit: - hso_free_net_device(hso_dev, true); + +err_free_tx_buf: + remove_net_device(hso_dev); + kfree(hso_net->mux_bulk_tx_buf); +err_free_tx_urb: + usb_free_urb(hso_net->mux_bulk_tx_urb); +err_mux_bulk_rx: + for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { + usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); + kfree(hso_net->mux_bulk_rx_buf_pool[i]); + } +err_net: + free_netdev(net); +err_hso_dev: + kfree(hso_dev); return NULL; }
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 66973bb56305..148e756857a8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -880,7 +880,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); - cmnd->write_zeroes.control = 0; + if (nvme_ns_has_pi(ns)) + cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); + else + cmnd->write_zeroes.control = 0; return BLK_STS_OK; }
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 42ad75ff1348..fb1c5ae0da39 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2591,7 +2591,9 @@ static void nvme_reset_work(struct work_struct *work) bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); int result;
- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { + if (dev->ctrl.state != NVME_CTRL_RESETTING) { + dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", + dev->ctrl.state); result = -ENODEV; goto out; } @@ -2998,7 +3000,6 @@ static void nvme_remove(struct pci_dev *pdev) if (!pci_device_is_present(pdev)) { nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_dev_disable(dev, true); - nvme_dev_remove_admin(dev); }
flush_work(&dev->ctrl.reset_work); diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c index 98c479dfae31..3041f0b3bbb6 100644 --- a/drivers/pwm/pwm-sprd.c +++ b/drivers/pwm/pwm-sprd.c @@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } }
- if (state->period != cstate->period || - state->duty_cycle != cstate->duty_cycle) { - ret = sprd_pwm_config(spc, pwm, state->duty_cycle, - state->period); - if (ret) - return ret; - } + ret = sprd_pwm_config(spc, pwm, state->duty_cycle, + state->period); + if (ret) + return ret;
sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1); } else if (cstate->enabled) { diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c index dc631c1a46b4..d144a4bdb76d 100644 --- a/drivers/regulator/hi6421-regulator.c +++ b/drivers/regulator/hi6421-regulator.c @@ -366,9 +366,8 @@ static struct hi6421_regulator_info
static int hi6421_regulator_enable(struct regulator_dev *rdev) { - struct hi6421_regulator_pdata *pdata; + struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
- pdata = dev_get_drvdata(rdev->dev.parent); /* hi6421 spec requires regulator enablement must be serialized: * - Because when BUCK, LDO switching from off to on, it will have * a huge instantaneous current; so you can not turn on two or @@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); - u32 reg_val; + struct hi6421_regulator_info *info; + unsigned int reg_val;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_IDLE; @@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); - u32 reg_val; + struct hi6421_regulator_info *info; + unsigned int reg_val;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_STANDBY; @@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); - u32 new_mode; + struct hi6421_regulator_info *info; + unsigned int new_mode;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); - u32 new_mode; + struct hi6421_regulator_info *info; + unsigned int new_mode;
+ info = container_of(rdev->desc, struct hi6421_regulator_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -459,7 +462,9 @@ static unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; + + info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
if (load_uA > info->eco_microamp) return REGULATOR_MODE_NORMAL; @@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; mutex_init(&pdata->lock); - platform_set_drvdata(pdev, pdata);
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) { /* assign per-regulator data */ info = &hi6421_regulator_info[i];
config.dev = pdev->dev.parent; - config.driver_data = info; + config.driver_data = pdata; config.regmap = pmic->regmap;
rdev = devm_regulator_register(&pdev->dev, &info->desc, diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b07105ae7c91..d8b05d8b5470 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -439,39 +439,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; - int param; - int param_type; + int param = -1;
if (attr == &dev_attr_iface_enabled.attr) param = ISCSI_NET_PARAM_IFACE_ENABLE; - else if (attr == &dev_attr_iface_vlan_id.attr) - param = ISCSI_NET_PARAM_VLAN_ID; - else if (attr == &dev_attr_iface_vlan_priority.attr) - param = ISCSI_NET_PARAM_VLAN_PRIORITY; - else if (attr == &dev_attr_iface_vlan_enabled.attr) - param = ISCSI_NET_PARAM_VLAN_ENABLED; - else if (attr == &dev_attr_iface_mtu.attr) - param = ISCSI_NET_PARAM_MTU; - else if (attr == &dev_attr_iface_port.attr) - param = ISCSI_NET_PARAM_PORT; - else if (attr == &dev_attr_iface_ipaddress_state.attr) - param = ISCSI_NET_PARAM_IPADDR_STATE; - else if (attr == &dev_attr_iface_delayed_ack_en.attr) - param = ISCSI_NET_PARAM_DELAYED_ACK_EN; - else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) - param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) - param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf.attr) - param = ISCSI_NET_PARAM_TCP_WSF; - else if (attr == &dev_attr_iface_tcp_timer_scale.attr) - param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; - else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) - param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; - else if (attr == &dev_attr_iface_cache_id.attr) - param = ISCSI_NET_PARAM_CACHE_ID; - else if (attr == &dev_attr_iface_redirect_en.attr) - param = ISCSI_NET_PARAM_REDIRECT_EN; else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) @@ -508,6 +479,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; + + if (param != -1) + return t->attr_is_visible(ISCSI_IFACE_PARAM, param); + + if (attr == &dev_attr_iface_vlan_id.attr) + param = ISCSI_NET_PARAM_VLAN_ID; + else if (attr == &dev_attr_iface_vlan_priority.attr) + param = ISCSI_NET_PARAM_VLAN_PRIORITY; + else if (attr == &dev_attr_iface_vlan_enabled.attr) + param = ISCSI_NET_PARAM_VLAN_ENABLED; + else if (attr == &dev_attr_iface_mtu.attr) + param = ISCSI_NET_PARAM_MTU; + else if (attr == &dev_attr_iface_port.attr) + param = ISCSI_NET_PARAM_PORT; + else if (attr == &dev_attr_iface_ipaddress_state.attr) + param = ISCSI_NET_PARAM_IPADDR_STATE; + else if (attr == &dev_attr_iface_delayed_ack_en.attr) + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf.attr) + param = ISCSI_NET_PARAM_TCP_WSF; + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_iface_cache_id.attr) + param = ISCSI_NET_PARAM_CACHE_ID; + else if (attr == &dev_attr_iface_redirect_en.attr) + param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; @@ -598,32 +601,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, return 0; }
- switch (param) { - case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: - case ISCSI_IFACE_PARAM_HDRDGST_EN: - case ISCSI_IFACE_PARAM_DATADGST_EN: - case ISCSI_IFACE_PARAM_IMM_DATA_EN: - case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: - case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: - case ISCSI_IFACE_PARAM_PDU_INORDER_EN: - case ISCSI_IFACE_PARAM_ERL: - case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: - case ISCSI_IFACE_PARAM_FIRST_BURST: - case ISCSI_IFACE_PARAM_MAX_R2T: - case ISCSI_IFACE_PARAM_MAX_BURST: - case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: - case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: - case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: - case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: - case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: - case ISCSI_IFACE_PARAM_INITIATOR_NAME: - param_type = ISCSI_IFACE_PARAM; - break; - default: - param_type = ISCSI_NET_PARAM; - } - - return t->attr_is_visible(param_type, param); + return t->attr_is_visible(ISCSI_NET_PARAM, param); }
static struct attribute *iscsi_iface_attrs[] = { diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index fe40626e45aa..61cbcc7e2121 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -84,6 +84,7 @@ MODULE_PARM_DESC(polling_limit_us, * struct bcm2835_spi - BCM2835 SPI controller * @regs: base address of register map * @clk: core clock, divided to calculate serial clock + * @clk_hz: core clock cached speed * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full * @tfr: SPI transfer currently processed * @ctlr: SPI controller reverse lookup @@ -124,6 +125,7 @@ MODULE_PARM_DESC(polling_limit_us, struct bcm2835_spi { void __iomem *regs; struct clk *clk; + unsigned long clk_hz; int irq; struct spi_transfer *tfr; struct spi_controller *ctlr; @@ -1082,19 +1084,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, struct spi_transfer *tfr) { struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); - unsigned long spi_hz, clk_hz, cdiv; + unsigned long spi_hz, cdiv; unsigned long hz_per_byte, byte_limit; u32 cs = bs->prepare_cs[spi->chip_select];
/* set clock */ spi_hz = tfr->speed_hz; - clk_hz = clk_get_rate(bs->clk);
- if (spi_hz >= clk_hz / 2) { + if (spi_hz >= bs->clk_hz / 2) { cdiv = 2; /* clk_hz/2 is the fastest we can go */ } else if (spi_hz) { /* CDIV must be a multiple of two */ - cdiv = DIV_ROUND_UP(clk_hz, spi_hz); + cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz); cdiv += (cdiv % 2);
if (cdiv >= 65536) @@ -1102,7 +1103,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, } else { cdiv = 0; /* 0 is the slowest we can go */ } - tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); + tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */ @@ -1320,6 +1321,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) return bs->irq ? bs->irq : -ENODEV;
clk_prepare_enable(bs->clk); + bs->clk_hz = clk_get_rate(bs->clk);
err = bcm2835_dma_init(ctlr, &pdev->dev, bs); if (err) diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 7a00346ff9b9..d62d69dd72b9 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr) { unsigned int dummy_clk;
+ if (!op->dummy.nbytes) + return 0; + dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); if (dtr) dummy_clk /= 2; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index a3afd1b9ac56..ceb16e70d235 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; }
+ pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi);
- pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); - irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -ENXIO; @@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 976f73b9e299..8d5fa7f1e506 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master, mtk_spi_setup_packet(master);
cnt = xfer->len / 4; - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); + if (xfer->tx_buf) + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); + + if (xfer->rx_buf) + ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
remainder = xfer->len % 4; if (remainder > 0) { reg_val = 0; - memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); - writel(reg_val, mdata->base + SPI_TX_DATA_REG); + if (xfer->tx_buf) { + memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); + writel(reg_val, mdata->base + SPI_TX_DATA_REG); + } + if (xfer->rx_buf) { + reg_val = readl(mdata->base + SPI_RX_DATA_REG); + memcpy(xfer->rx_buf + (cnt * 4), ®_val, remainder); + } }
mtk_spi_enable_transfer(master); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 8ffcffbb8157..a92a28933edb 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -1925,6 +1925,7 @@ static int stm32_spi_probe(struct platform_device *pdev) master->can_dma = stm32_spi_can_dma;
pm_runtime_set_active(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); pm_runtime_enable(&pdev->dev);
ret = spi_register_master(master); @@ -1940,6 +1941,8 @@ static int stm32_spi_probe(struct platform_device *pdev)
err_pm_disable: pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); @@ -1956,9 +1959,14 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master);
+ pm_runtime_get_sync(&pdev->dev); + spi_unregister_master(master); spi->cfg->disable(spi);
+ pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); if (master->dma_tx) dma_release_channel(master->dma_tx); if (master->dma_rx) @@ -1966,7 +1974,6 @@ static int stm32_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(spi->clk);
- pm_runtime_disable(&pdev->dev);
pinctrl_pm_select_sleep_state(&pdev->dev);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 7b07e557dc8d..6594bb0b9df0 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -25,7 +25,7 @@ #include "target_core_alua.h"
static sense_reason_t -sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t @@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) }
static sense_reason_t -sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) +sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) { struct se_device *dev = cmd->se_dev; sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); sense_reason_t ret;
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) { + if ((flags & 0x04) || (flags & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); @@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o }
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ - if (flags[0] & 0x10) { + if (flags & 0x10) { pr_warn("WRITE SAME with ANCHOR not supported\n"); return TCM_INVALID_CDB_FIELD; } @@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. */ - if (flags[0] & 0x08) { + if (flags & 0x08) { if (!ops->execute_unmap) return TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o if (!ops->execute_write_same) return TCM_UNSUPPORTED_SCSI_OPCODE;
- ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); + ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); if (ret) return ret;
@@ -717,10 +717,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_ }
static sense_reason_t -sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, u32 sectors, bool is_write) { - u8 protect = cdb[1] >> 5; int sp_ops = cmd->se_sess->sup_prot_ops; int pi_prot_type = dev->dev_attrib.pi_prot_type; bool fabric_prot = false; @@ -768,7 +767,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, fallthrough; default: pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " - "PROTECT: 0x%02x\n", cdb[0], protect); + "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); return TCM_INVALID_CDB_FIELD; }
@@ -843,7 +842,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret;
@@ -857,7 +856,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret;
@@ -871,7 +870,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); if (ret) return ret;
@@ -892,7 +891,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret;
@@ -906,7 +905,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret;
@@ -921,7 +920,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD;
- ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); if (ret) return ret;
@@ -980,7 +979,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- ret = sbc_setup_write_same(cmd, &cdb[10], ops); + ret = sbc_setup_write_same(cmd, cdb[10], ops); if (ret) return ret; break; @@ -1079,7 +1078,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- ret = sbc_setup_write_same(cmd, &cdb[1], ops); + ret = sbc_setup_write_same(cmd, cdb[1], ops); if (ret) return ret; break; @@ -1097,7 +1096,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) * Follow sbcr26 with WRITE_SAME (10) and check for the existence * of byte 1 bit 3 UNMAP instead of original reserved field */ - ret = sbc_setup_write_same(cmd, &cdb[1], ops); + ret = sbc_setup_write_same(cmd, cdb[1], ops); if (ret) return ret; break; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7e35eddd9eb7..26ceabe34de5 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -886,7 +886,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) INIT_WORK(&cmd->work, success ? target_complete_ok_work : target_complete_failure_work);
- if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) + if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) cpu = cmd->cpuid; else cpu = wwn->cmd_compl_affinity; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index df8e69e60aaf..4e123336e410 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -48,6 +48,7 @@
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */ #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ +#define USB_PING_RESPONSE_TIME 400 /* ns */
/* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can @@ -182,8 +183,9 @@ int usb_device_supports_lpm(struct usb_device *udev) }
/* - * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from - * either U1 or U2. + * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from + * U1/U2, send a PING to the device and receive a PING_RESPONSE. + * See USB 3.1 section C.1.5.2 */ static void usb_set_lpm_mel(struct usb_device *udev, struct usb3_lpm_parameters *udev_lpm_params, @@ -193,35 +195,37 @@ static void usb_set_lpm_mel(struct usb_device *udev, unsigned int hub_exit_latency) { unsigned int total_mel; - unsigned int device_mel; - unsigned int hub_mel;
/* - * Calculate the time it takes to transition all links from the roothub - * to the parent hub into U0. The parent hub must then decode the - * packet (hub header decode latency) to figure out which port it was - * bound for. - * - * The Hub Header decode latency is expressed in 0.1us intervals (0x1 - * means 0.1us). Multiply that by 100 to get nanoseconds. + * tMEL1. time to transition path from host to device into U0. + * MEL for parent already contains the delay up to parent, so only add + * the exit latency for the last link (pick the slower exit latency), + * and the hub header decode latency. See USB 3.1 section C 2.2.1 + * Store MEL in nanoseconds */ total_mel = hub_lpm_params->mel + - (hub->descriptor->u.ss.bHubHdrDecLat * 100); + max(udev_exit_latency, hub_exit_latency) * 1000 + + hub->descriptor->u.ss.bHubHdrDecLat * 100;
/* - * How long will it take to transition the downstream hub's port into - * U0? The greater of either the hub exit latency or the device exit - * latency. - * - * The BOS U1/U2 exit latencies are expressed in 1us intervals. - * Multiply that by 1000 to get nanoseconds. + * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for + * each link + wHubDelay for each hub. Add only for last link. + * tMEL4, the time for PING_RESPONSE to traverse upstream is similar. + * Multiply by 2 to include it as well. */ - device_mel = udev_exit_latency * 1000; - hub_mel = hub_exit_latency * 1000; - if (device_mel > hub_mel) - total_mel += device_mel; - else - total_mel += hub_mel; + total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) + + USB_TP_TRANSMISSION_DELAY) * 2; + + /* + * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE + * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4 + * to cover the delay if the PING_RESPONSE is queued behind a Max Packet + * Size DP. + * Note these delays should be added only once for the entire path, so + * add them to the MEL of the device connected to the roothub. + */ + if (!hub->hdev->parent) + total_mel += USB_PING_RESPONSE_TIME + 2100;
udev_lpm_params->mel = total_mel; } @@ -4090,6 +4094,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev, return 0; }
+/* + * Don't allow device intiated U1/U2 if the system exit latency + one bus + * interval is greater than the minimum service interval of any active + * periodic endpoint. See USB 3.2 section 9.4.9 + */ +static bool usb_device_may_initiate_lpm(struct usb_device *udev, + enum usb3_link_state state) +{ + unsigned int sel; /* us */ + int i, j; + + if (state == USB3_LPM_U1) + sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); + else if (state == USB3_LPM_U2) + sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); + else + return false; + + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + struct usb_interface *intf; + struct usb_endpoint_descriptor *desc; + unsigned int interval; + + intf = udev->actconfig->interface[i]; + if (!intf) + continue; + + for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) { + desc = &intf->cur_altsetting->endpoint[j].desc; + + if (usb_endpoint_xfer_int(desc) || + usb_endpoint_xfer_isoc(desc)) { + interval = (1 << (desc->bInterval - 1)) * 125; + if (sel + 125 > interval) + return false; + } + } + } + return true; +} + /* * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated * U1/U2 entry. @@ -4162,20 +4207,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, * U1/U2_ENABLE */ if (udev->actconfig && - usb_set_device_initiated_lpm(udev, state, true) == 0) { - if (state == USB3_LPM_U1) - udev->usb3_lpm_u1_enabled = 1; - else if (state == USB3_LPM_U2) - udev->usb3_lpm_u2_enabled = 1; - } else { - /* Don't request U1/U2 entry if the device - * cannot transition to U1/U2. - */ - usb_set_lpm_timeout(udev, state, 0); - hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); + usb_device_may_initiate_lpm(udev, state)) { + if (usb_set_device_initiated_lpm(udev, state, true)) { + /* + * Request to enable device initiated U1/U2 failed, + * better to turn off lpm in this case. + */ + usb_set_lpm_timeout(udev, state, 0); + hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); + return; + } } -}
+ if (state == USB3_LPM_U1) + udev->usb3_lpm_u1_enabled = 1; + else if (state == USB3_LPM_U2) + udev->usb3_lpm_u2_enabled = 1; +} /* * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated * U1/U2 entry. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 21e7522655ac..a54a735b6384 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -502,10 +502,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
- /* Fibocom L850-GL LTE Modem */ - { USB_DEVICE(0x2cb7, 0x0007), .driver_info = - USB_QUIRK_IGNORE_REMOTE_WAKEUP }, - /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index ab6b815e0089..483de2bbfaab 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -383,6 +383,9 @@ enum dwc2_ep0_state { * 0 - No (default) * 1 - Partial power down * 2 - Hibernation + * @no_clock_gating: Specifies whether to avoid clock gating feature. + * 0 - No (use clock gating) + * 1 - Yes (avoid it) * @lpm: Enable LPM support. * 0 - No * 1 - Yes @@ -480,6 +483,7 @@ struct dwc2_core_params { #define DWC2_POWER_DOWN_PARAM_NONE 0 #define DWC2_POWER_DOWN_PARAM_PARTIAL 1 #define DWC2_POWER_DOWN_PARAM_HIBERNATION 2 + bool no_clock_gating;
bool lpm; bool lpm_clock_gating; diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index a5ab03808da6..a5c52b237e72 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -556,7 +556,8 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg) * If neither hibernation nor partial power down are supported, * clock gating is used to save power. */ - dwc2_gadget_enter_clock_gating(hsotg); + if (!hsotg->params.no_clock_gating) + dwc2_gadget_enter_clock_gating(hsotg); }
/* diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 184964174dc0..a9b63af5e498 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2749,12 +2749,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, return; }
- /* Zlp for all endpoints, for ep0 only in DATA IN stage */ + /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */ if (hs_ep->send_zlp) { - dwc2_hsotg_program_zlp(hsotg, hs_ep); hs_ep->send_zlp = 0; - /* transfer will be completed on next complete interrupt */ - return; + if (!using_desc_dma(hsotg)) { + dwc2_hsotg_program_zlp(hsotg, hs_ep); + /* transfer will be completed on next complete interrupt */ + return; + } }
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { @@ -3900,9 +3902,27 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, __func__); } } else { + /* Mask GINTSTS_GOUTNAKEFF interrupt */ + dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF); + if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF)) dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
+ if (!using_dma(hsotg)) { + /* Wait for GINTSTS_RXFLVL interrupt */ + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, + GINTSTS_RXFLVL, 100)) { + dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n", + __func__); + } else { + /* + * Pop GLOBAL OUT NAK status packet from RxFIFO + * to assert GOUTNAKEFF interrupt + */ + dwc2_readl(hsotg, GRXSTSP); + } + } + /* Wait for global nak to take effect */ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_GOUTNAKEFF, 100)) @@ -4348,6 +4368,9 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) epctl = dwc2_readl(hs, epreg);
if (value) { + /* Unmask GOUTNAKEFF interrupt */ + dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF); + if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF)) dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK); // STALL bit will be set in GOUTNAKEFF interrupt handler diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 035d4911a3c3..2a7828971d05 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -3338,7 +3338,8 @@ int dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) * If not hibernation nor partial power down are supported, * clock gating is used to save power. */ - dwc2_host_enter_clock_gating(hsotg); + if (!hsotg->params.no_clock_gating) + dwc2_host_enter_clock_gating(hsotg); break; }
@@ -4402,7 +4403,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) * If not hibernation nor partial power down are supported, * clock gating is used to save power. */ - dwc2_host_enter_clock_gating(hsotg); + if (!hsotg->params.no_clock_gating) + dwc2_host_enter_clock_gating(hsotg);
/* After entering suspend, hardware is not accessible */ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 7a6089fa81e1..1b67d7baccda 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -76,6 +76,7 @@ static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg) struct dwc2_core_params *p = &hsotg->params;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE; + p->no_clock_gating = true; p->phy_utmi_width = 8; }
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 2319c9737c2b..f3f112b08c9b 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -3861,6 +3861,7 @@ static int tegra_xudc_probe(struct platform_device *pdev) return 0;
free_eps: + pm_runtime_disable(&pdev->dev); tegra_xudc_free_eps(xudc); free_event_ring: tegra_xudc_free_event_ring(xudc); diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 94b5e64ae9a2..f214aa628592 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -703,24 +703,28 @@ EXPORT_SYMBOL_GPL(ehci_setup); static irqreturn_t ehci_irq (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); - u32 status, masked_status, pcd_status = 0, cmd; + u32 status, current_status, masked_status, pcd_status = 0; + u32 cmd; int bh;
spin_lock(&ehci->lock);
- status = ehci_readl(ehci, &ehci->regs->status); + status = 0; + current_status = ehci_readl(ehci, &ehci->regs->status); +restart:
/* e.g. cardbus physical eject */ - if (status == ~(u32) 0) { + if (current_status == ~(u32) 0) { ehci_dbg (ehci, "device removed\n"); goto dead; } + status |= current_status;
/* * We don't use STS_FLR, but some controllers don't like it to * remain on, so mask it out along with the other status bits. */ - masked_status = status & (INTR_MASK | STS_FLR); + masked_status = current_status & (INTR_MASK | STS_FLR);
/* Shared IRQ? */ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { @@ -730,6 +734,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* clear (just) interrupts */ ehci_writel(ehci, masked_status, &ehci->regs->status); + + /* For edge interrupts, don't race with an interrupt bit being raised */ + current_status = ehci_readl(ehci, &ehci->regs->status); + if (current_status & INTR_MASK) + goto restart; + cmd = ehci_readl(ehci, &ehci->regs->command); bh = 0;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index afd9174d83b1..abceca1c9c0f 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -153,8 +153,6 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; - struct usb_device *loaded_dev; /* dev that's loaded into the chip */ - int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; @@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void -max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, - int force_toggles) +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) { - struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); - int old_epnum, same_ep, rcvtog, sndtog; - struct usb_device *old_dev; + int rcvtog, sndtog; u8 hctl;
- old_dev = max3421_hcd->loaded_dev; - old_epnum = max3421_hcd->loaded_epnum; - - same_ep = (dev == old_dev && epnum == old_epnum); - if (same_ep && !force_toggles) - return; - - if (old_dev && !same_ep) { - /* save the old end-points toggles: */ - u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); - - rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; - sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; - - /* no locking: HCD (i.e., we) own toggles, don't we? */ - usb_settoggle(old_dev, old_epnum, 0, rcvtog); - usb_settoggle(old_dev, old_epnum, 1, sndtog); - } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
- max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
/* @@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ - max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); }
@@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; - int epnum, force_toggles = 0; + int epnum; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; @@ -777,7 +752,6 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; - force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } @@ -785,7 +759,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) spin_unlock_irqrestore(&max3421_hcd->lock, flags);
max3421_ep->last_active = max3421_hcd->frame_number; - max3421_set_address(hcd, urb->dev, epnum, force_toggles); + max3421_set_address(hcd, urb->dev, epnum); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; @@ -1380,6 +1354,16 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { + /* save the old end-points toggles: */ + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; + int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + int epnum = usb_endpoint_num(&urb->ep->desc); + + /* no locking: HCD (i.e., we) own toggles, don't we? */ + usb_settoggle(urb->dev, epnum, 0, rcvtog); + usb_settoggle(urb->dev, epnum, 1, sndtog); + max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e9b18fc17617..151e93c4bd57 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1638,11 +1638,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) * Inform the usbcore about resume-in-progress by returning * a non-zero value even if there are no status changes. */ + spin_lock_irqsave(&xhci->lock, flags); + status = bus_state->resuming_ports;
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
- spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ for (i = 0; i < max_ports; i++) { temp = readl(ports[i]->addr); diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c index 431213cdf9e0..f97ac9f52bf4 100644 --- a/drivers/usb/host/xhci-pci-renesas.c +++ b/drivers/usb/host/xhci-pci-renesas.c @@ -207,8 +207,7 @@ static int renesas_check_rom_state(struct pci_dev *pdev) return 0;
case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */ - dev_dbg(&pdev->dev, "Unknown ROM status ...\n"); - break; + return 0;
case RENESAS_ROM_STATUS_ERROR: /* Error State */ default: /* All other states are marked as "Reserved states" */ @@ -225,12 +224,13 @@ static int renesas_fw_check_running(struct pci_dev *pdev) u8 fw_state; int err;
- /* - * Only if device has ROM and loaded FW we can skip loading and - * return success. Otherwise (even unknown state), attempt to load FW. - */ - if (renesas_check_rom(pdev) && !renesas_check_rom_state(pdev)) - return 0; + /* Check if device has ROM and loaded, if so skip everything */ + err = renesas_check_rom(pdev); + if (err) { /* we have rom */ + err = renesas_check_rom_state(pdev); + if (!err) + return err; + }
/* * Test if the device is actually needing the firmware. As most diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 18c2bbddf080..1c9a7957c45c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -636,7 +636,14 @@ static const struct pci_device_id pci_ids[] = { { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, pci_ids); + +/* + * Without CONFIG_USB_XHCI_PCI_RENESAS renesas_xhci_check_request_fw() won't + * load firmware, so don't encumber the xhci-pci driver with it. + */ +#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) MODULE_FIRMWARE("renesas_usb_fw.mem"); +#endif
/* pci driver glue; this is a "new style" PCI driver module */ static struct pci_driver xhci_pci_driver = { diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index b5e7991dc7d9..a3c2b01ccf7b 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); +static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable); +static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); @@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) if (chan) { dmaengine_terminate_all(chan); usbhsf_dma_unmap(pkt); + } else { + if (usbhs_pipe_is_dir_in(pipe)) + usbhsf_rx_irq_ctrl(pipe, 0); + else + usbhsf_tx_irq_ctrl(pipe, 0); }
usbhs_pipe_clear_without_sequence(pipe, 0, 0); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fcb812bc832c..ea2e2d925a96 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ @@ -202,8 +203,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ - { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ - { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */ + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 7608584ef4fe..0fbe253dc570 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 +#define UBLOX_PRODUCT_R6XX 0x90fa /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = { /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), + .driver_info = RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index f9677a5ec31b..c35a6db993f1 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Julian Sikorski belegdol@gmail.com */ +UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, + "LaCie", + "Rugged USB3-FW", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c index 6eaeba9b096e..e7745d1c2a5c 100644 --- a/drivers/usb/typec/stusb160x.c +++ b/drivers/usb/typec/stusb160x.c @@ -685,6 +685,15 @@ static int stusb160x_probe(struct i2c_client *client) if (!fwnode) return -ENODEV;
+ /* + * This fwnode has a "compatible" property, but is never populated as a + * struct device. Instead we simply parse it to read the properties. + * This it breaks fw_devlink=on. To maintain backward compatibility + * with existing DT files, we work around this by deleting any + * fwnode_links to/from this fwnode. + */ + fw_devlink_purge_absent_suppliers(fwnode); + /* * When both VDD and VSYS power supplies are present, the low power * supply VSYS is selected when VSYS voltage is above 3.1 V. @@ -739,10 +748,6 @@ static int stusb160x_probe(struct i2c_client *client) typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
if (client->irq) { - ret = stusb160x_irq_init(chip, client->irq); - if (ret) - goto port_unregister; - chip->role_sw = fwnode_usb_role_switch_get(fwnode); if (IS_ERR(chip->role_sw)) { ret = PTR_ERR(chip->role_sw); @@ -752,6 +757,10 @@ static int stusb160x_probe(struct i2c_client *client) ret); goto port_unregister; } + + ret = stusb160x_irq_init(chip, client->irq); + if (ret) + goto role_sw_put; } else { /* * If Source or Dual power role, need to enable VDD supply @@ -775,6 +784,9 @@ static int stusb160x_probe(struct i2c_client *client)
return 0;
+role_sw_put: + if (chip->role_sw) + usb_role_switch_put(chip->role_sw); port_unregister: typec_unregister_port(chip->port); all_reg_disable: diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 938219bc1b4b..21b3ae25c76d 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -629,6 +629,15 @@ static int tps6598x_probe(struct i2c_client *client) if (!fwnode) return -ENODEV;
+ /* + * This fwnode has a "compatible" property, but is never populated as a + * struct device. Instead we simply parse it to read the properties. + * This breaks fw_devlink=on. To maintain backward compatibility + * with existing DT files, we work around this by deleting any + * fwnode_links to/from this fwnode. + */ + fw_devlink_purge_absent_suppliers(fwnode); + tps->role_sw = fwnode_usb_role_switch_get(fwnode); if (IS_ERR(tps->role_sw)) { ret = PTR_ERR(tps->role_sw); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index d3c6bb22c5f4..a3f5de28be79 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
static int afs_deliver_yfs_cb_callback(struct afs_call *);
-#define CM_NAME(name) \ - char afs_SRXCB##name##_name[] __tracepoint_string = \ - "CB." #name - /* * CB.CallBack operation type */ -static CM_NAME(CallBack); static const struct afs_call_type afs_SRXCBCallBack = { - .name = afs_SRXCBCallBack_name, + .name = "CB.CallBack", .deliver = afs_deliver_cb_callback, .destructor = afs_cm_destructor, .work = SRXAFSCB_CallBack, @@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = { /* * CB.InitCallBackState operation type */ -static CM_NAME(InitCallBackState); static const struct afs_call_type afs_SRXCBInitCallBackState = { - .name = afs_SRXCBInitCallBackState_name, + .name = "CB.InitCallBackState", .deliver = afs_deliver_cb_init_call_back_state, .destructor = afs_cm_destructor, .work = SRXAFSCB_InitCallBackState, @@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = { /* * CB.InitCallBackState3 operation type */ -static CM_NAME(InitCallBackState3); static const struct afs_call_type afs_SRXCBInitCallBackState3 = { - .name = afs_SRXCBInitCallBackState3_name, + .name = "CB.InitCallBackState3", .deliver = afs_deliver_cb_init_call_back_state3, .destructor = afs_cm_destructor, .work = SRXAFSCB_InitCallBackState, @@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { /* * CB.Probe operation type */ -static CM_NAME(Probe); static const struct afs_call_type afs_SRXCBProbe = { - .name = afs_SRXCBProbe_name, + .name = "CB.Probe", .deliver = afs_deliver_cb_probe, .destructor = afs_cm_destructor, .work = SRXAFSCB_Probe, @@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = { /* * CB.ProbeUuid operation type */ -static CM_NAME(ProbeUuid); static const struct afs_call_type afs_SRXCBProbeUuid = { - .name = afs_SRXCBProbeUuid_name, + .name = "CB.ProbeUuid", .deliver = afs_deliver_cb_probe_uuid, .destructor = afs_cm_destructor, .work = SRXAFSCB_ProbeUuid, @@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = { /* * CB.TellMeAboutYourself operation type */ -static CM_NAME(TellMeAboutYourself); static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { - .name = afs_SRXCBTellMeAboutYourself_name, + .name = "CB.TellMeAboutYourself", .deliver = afs_deliver_cb_tell_me_about_yourself, .destructor = afs_cm_destructor, .work = SRXAFSCB_TellMeAboutYourself, @@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { /* * YFS CB.CallBack operation type */ -static CM_NAME(YFS_CallBack); static const struct afs_call_type afs_SRXYFSCB_CallBack = { - .name = afs_SRXCBYFS_CallBack_name, + .name = "YFSCB.CallBack", .deliver = afs_deliver_yfs_cb_callback, .destructor = afs_cm_destructor, .work = SRXAFSCB_CallBack, diff --git a/fs/afs/write.c b/fs/afs/write.c index 3104b62c2082..c0534697268e 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -771,14 +771,20 @@ int afs_writepages(struct address_space *mapping, if (wbc->range_cyclic) { start = mapping->writeback_index * PAGE_SIZE; ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); - if (start > 0 && wbc->nr_to_write > 0 && ret == 0) - ret = afs_writepages_region(mapping, wbc, 0, start, - &next); - mapping->writeback_index = next / PAGE_SIZE; + if (ret == 0) { + mapping->writeback_index = next / PAGE_SIZE; + if (start > 0 && wbc->nr_to_write > 0) { + ret = afs_writepages_region(mapping, wbc, 0, + start, &next); + if (ret == 0) + mapping->writeback_index = + next / PAGE_SIZE; + } + } } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); - if (wbc->nr_to_write > 0) - mapping->writeback_index = next; + if (wbc->nr_to_write > 0 && ret == 0) + mapping->writeback_index = next / PAGE_SIZE; } else { ret = afs_writepages_region(mapping, wbc, wbc->range_start, wbc->range_end, &next); diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 117d423fdb93..e656134cadaa 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1488,15 +1488,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, int btrfs_find_all_roots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist **roots, - bool ignore_offset) + bool ignore_offset, bool skip_commit_root_sem) { int ret;
- if (!trans) + if (!trans && !skip_commit_root_sem) down_read(&fs_info->commit_root_sem); ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr, time_seq, roots, ignore_offset); - if (!trans) + if (!trans && !skip_commit_root_sem) up_read(&fs_info->commit_root_sem); return ret; } diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 17abde7f794c..ff5f07f9940b 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -47,7 +47,8 @@ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, const u64 *extent_item_pos, bool ignore_offset); int btrfs_find_all_roots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, - u64 time_seq, struct ulist **roots, bool ignore_offset); + u64 time_seq, struct ulist **roots, bool ignore_offset, + bool skip_commit_root_sem); char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, u32 name_len, unsigned long name_off, struct extent_buffer *eb_in, u64 parent, diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index c92d9d4f5f46..6689cac64c15 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -1000,7 +1000,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
if (qrecord_inserted) - btrfs_qgroup_trace_extent_post(fs_info, record); + btrfs_qgroup_trace_extent_post(trans, record);
return 0; } @@ -1095,7 +1095,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
if (qrecord_inserted) - return btrfs_qgroup_trace_extent_post(fs_info, record); + return btrfs_qgroup_trace_extent_post(trans, record); return 0; }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d2f39a122d89..7e79467c08ff 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6034,6 +6034,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) mutex_lock(&fs_info->fs_devices->device_list_mutex); devices = &fs_info->fs_devices->devices; list_for_each_entry(device, devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) + continue; + ret = btrfs_trim_free_extents(device, &group_trimmed); if (ret) { dev_failed++; diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 3ded812f522c..dde8b8334d29 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1704,17 +1704,39 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, return 0; }
-int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, +int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, struct btrfs_qgroup_extent_record *qrecord) { struct ulist *old_root; u64 bytenr = qrecord->bytenr; int ret;
- ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); + /* + * We are always called in a context where we are already holding a + * transaction handle. Often we are called when adding a data delayed + * reference from btrfs_truncate_inode_items() (truncating or unlinking), + * in which case we will be holding a write lock on extent buffer from a + * subvolume tree. In this case we can't allow btrfs_find_all_roots() to + * acquire fs_info->commit_root_sem, because that is a higher level lock + * that must be acquired before locking any extent buffers. + * + * So we want btrfs_find_all_roots() to not acquire the commit_root_sem + * but we can't pass it a non-NULL transaction handle, because otherwise + * it would not use commit roots and would lock extent buffers, causing + * a deadlock if it ends up trying to read lock the same extent buffer + * that was previously write locked at btrfs_truncate_inode_items(). + * + * So pass a NULL transaction handle to btrfs_find_all_roots() and + * explicitly tell it to not acquire the commit_root_sem - if we are + * holding a transaction handle we don't need its protection. + */ + ASSERT(trans != NULL); + + ret = btrfs_find_all_roots(NULL, trans->fs_info, bytenr, 0, &old_root, + false, true); if (ret < 0) { - fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; - btrfs_warn(fs_info, + trans->fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; + btrfs_warn(trans->fs_info, "error accounting new delayed refs extent (err code: %d), quota inconsistent", ret); return 0; @@ -1758,7 +1780,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr, kfree(record); return 0; } - return btrfs_qgroup_trace_extent_post(fs_info, record); + return btrfs_qgroup_trace_extent_post(trans, record); }
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, @@ -2629,7 +2651,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) /* Search commit root to find old_roots */ ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0, - &record->old_roots, false); + &record->old_roots, false, false); if (ret < 0) goto cleanup; } @@ -2645,7 +2667,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) * current root. It's safe inside commit_transaction(). */ ret = btrfs_find_all_roots(trans, fs_info, - record->bytenr, BTRFS_SEQ_LAST, &new_roots, false); + record->bytenr, BTRFS_SEQ_LAST, &new_roots, false, false); if (ret < 0) goto cleanup; if (qgroup_to_skip) { @@ -3179,7 +3201,7 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans, num_bytes = found.offset;
ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, - &roots, false); + &roots, false, false); if (ret < 0) goto out; /* For rescan, just pass old_roots as NULL */ diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 7283e4f549af..880e9df0dac1 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -298,7 +298,7 @@ int btrfs_qgroup_trace_extent_nolock( * using current root, then we can move all expensive backref walk out of * transaction committing, but not now as qgroup accounting will be wrong again. */ -int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, +int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans, struct btrfs_qgroup_extent_record *qrecord);
/* diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index f3137285a9e2..98b5aaba46f1 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -224,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, * quota. */ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -237,7 +237,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -261,7 +261,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, new_roots = NULL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -273,7 +273,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root, return -EINVAL;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -325,7 +325,7 @@ static int test_multiple_refs(struct btrfs_root *root, }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -338,7 +338,7 @@ static int test_multiple_refs(struct btrfs_root *root, return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -360,7 +360,7 @@ static int test_multiple_refs(struct btrfs_root *root, }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -373,7 +373,7 @@ static int test_multiple_refs(struct btrfs_root *root, return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); @@ -401,7 +401,7 @@ static int test_multiple_refs(struct btrfs_root *root, }
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, - false); + false, false); if (ret) { ulist_free(old_roots); test_err("couldn't find old roots: %d", ret); @@ -414,7 +414,7 @@ static int test_multiple_refs(struct btrfs_root *root, return ret;
ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, - false); + false, false); if (ret) { ulist_free(old_roots); ulist_free(new_roots); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b5ed0a699ee5..f08375cb871e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -5515,16 +5515,29 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, spin_lock(&inode->lock); inode->logged_trans = trans->transid; /* - * Don't update last_log_commit if we logged that an inode exists - * after it was loaded to memory (full_sync bit set). - * This is to prevent data loss when we do a write to the inode, - * then the inode gets evicted after all delalloc was flushed, - * then we log it exists (due to a rename for example) and then - * fsync it. This last fsync would do nothing (not logging the - * extents previously written). + * Don't update last_log_commit if we logged that an inode exists. + * We do this for two reasons: + * + * 1) We might have had buffered writes to this inode that were + * flushed and had their ordered extents completed in this + * transaction, but we did not previously log the inode with + * LOG_INODE_ALL. Later the inode was evicted and after that + * it was loaded again and this LOG_INODE_EXISTS log operation + * happened. We must make sure that if an explicit fsync against + * the inode is performed later, it logs the new extents, an + * updated inode item, etc, and syncs the log. The same logic + * applies to direct IO writes instead of buffered writes. + * + * 2) When we log the inode with LOG_INODE_EXISTS, its inode item + * is logged with an i_size of 0 or whatever value was logged + * before. If later the i_size of the inode is increased by a + * truncate operation, the log is synced through an fsync of + * some other inode and then finally an explicit fsync against + * this inode is made, we must make sure this fsync logs the + * inode with the new i_size, the hole between old i_size and + * the new i_size, and syncs the log. */ - if (inode_only != LOG_INODE_EXISTS || - !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) + if (inode_only != LOG_INODE_EXISTS) inode->last_log_commit = inode->last_sub_trans; spin_unlock(&inode->lock); } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index e5af591d3bd4..86f09b1110a2 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4468,7 +4468,7 @@ bool check_session_state(struct ceph_mds_session *s) break; case CEPH_MDS_SESSION_CLOSING: /* Should never reach this when we're unmounting */ - WARN_ON_ONCE(true); + WARN_ON_ONCE(s->s_ttl); fallthrough; case CEPH_MDS_SESSION_NEW: case CEPH_MDS_SESSION_RESTARTING: diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 903de7449aa3..64cad843ce72 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -3613,7 +3613,7 @@ static int smb3_simple_fallocate_write_range(unsigned int xid, char *buf) { struct cifs_io_parms io_parms = {0}; - int nbytes; + int rc, nbytes; struct kvec iov[2];
io_parms.netfid = cfile->fid.netfid; @@ -3621,13 +3621,25 @@ static int smb3_simple_fallocate_write_range(unsigned int xid, io_parms.tcon = tcon; io_parms.persistent_fid = cfile->fid.persistent_fid; io_parms.volatile_fid = cfile->fid.volatile_fid; - io_parms.offset = off; - io_parms.length = len;
- /* iov[0] is reserved for smb header */ - iov[1].iov_base = buf; - iov[1].iov_len = io_parms.length; - return SMB2_write(xid, &io_parms, &nbytes, iov, 1); + while (len) { + io_parms.offset = off; + io_parms.length = len; + if (io_parms.length > SMB2_MAX_BUFFER_SIZE) + io_parms.length = SMB2_MAX_BUFFER_SIZE; + /* iov[0] is reserved for smb header */ + iov[1].iov_base = buf; + iov[1].iov_len = io_parms.length; + rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1); + if (rc) + break; + if (nbytes > len) + return -EINVAL; + buf += nbytes; + off += nbytes; + len -= nbytes; + } + return rc; }
static int smb3_simple_fallocate_range(unsigned int xid, @@ -3651,11 +3663,6 @@ static int smb3_simple_fallocate_range(unsigned int xid, (char **)&out_data, &out_data_len); if (rc) goto out; - /* - * It is already all allocated - */ - if (out_data_len == 0) - goto out;
buf = kzalloc(1024 * 1024, GFP_KERNEL); if (buf == NULL) { @@ -3778,6 +3785,24 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, goto out; }
+ if (keep_size == true) { + /* + * We can not preallocate pages beyond the end of the file + * in SMB2 + */ + if (off >= i_size_read(inode)) { + rc = 0; + goto out; + } + /* + * For fallocates that are partially beyond the end of file, + * clamp len so we only fallocate up to the end of file. + */ + if (off + len > i_size_read(inode)) { + len = i_size_read(inode) - off; + } + } + if ((keep_size == true) || (i_size_read(inode) >= off + len)) { /* * At this point, we are trying to fallocate an internal diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 30dee68458c7..f7b80307b8f8 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -77,7 +77,7 @@ enum hugetlb_param { static const struct fs_parameter_spec hugetlb_fs_parameters[] = { fsparam_u32 ("gid", Opt_gid), fsparam_string("min_size", Opt_min_size), - fsparam_u32 ("mode", Opt_mode), + fsparam_u32oct("mode", Opt_mode), fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("pagesize", Opt_pagesize), fsparam_string("size", Opt_size), diff --git a/fs/io_uring.c b/fs/io_uring.c index eeea6b8c8bee..df4288776815 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4805,6 +4805,7 @@ IO_NETOP_FN(recv); struct io_poll_table { struct poll_table_struct pt; struct io_kiocb *req; + int nr_entries; int error; };
@@ -5002,11 +5003,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, struct io_kiocb *req = pt->req;
/* - * If poll->head is already set, it's because the file being polled - * uses multiple waitqueues for poll handling (eg one for read, one - * for write). Setup a separate io_poll_iocb if this happens. + * The file being polled uses multiple waitqueues for poll handling + * (e.g. one for read, one for write). Setup a separate io_poll_iocb + * if this happens. */ - if (unlikely(poll->head)) { + if (unlikely(pt->nr_entries)) { struct io_poll_iocb *poll_one = poll;
/* already have a 2nd entry, fail a third attempt */ @@ -5034,7 +5035,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, *poll_ptr = poll; }
- pt->error = 0; + pt->nr_entries++; poll->head = head;
if (poll->events & EPOLLEXCLUSIVE) @@ -5112,11 +5113,16 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
ipt->pt._key = mask; ipt->req = req; - ipt->error = -EINVAL; + ipt->error = 0; + ipt->nr_entries = 0;
mask = vfs_poll(req->file, &ipt->pt) & poll->events; + if (unlikely(!ipt->nr_entries) && !ipt->error) + ipt->error = -EINVAL;
spin_lock_irq(&ctx->completion_lock); + if (ipt->error) + io_poll_remove_double(req); if (likely(poll->head)) { spin_lock(&poll->head->lock); if (unlikely(list_empty(&poll->wait.entry))) { @@ -6876,7 +6882,8 @@ static int io_sq_thread(void *data) }
prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); - if (!test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) { + if (!test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) && + !io_run_task_work()) { list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) io_ring_set_wakeup_flag(ctx);
@@ -7859,15 +7866,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, struct io_wq_data data; unsigned int concurrency;
+ mutex_lock(&ctx->uring_lock); hash = ctx->hash_map; if (!hash) { hash = kzalloc(sizeof(*hash), GFP_KERNEL); - if (!hash) + if (!hash) { + mutex_unlock(&ctx->uring_lock); return ERR_PTR(-ENOMEM); + } refcount_set(&hash->refs, 1); init_waitqueue_head(&hash->wait); ctx->hash_map = hash; } + mutex_unlock(&ctx->uring_lock);
data.hash = hash; data.task = task; @@ -7942,9 +7953,11 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, f = fdget(p->wq_fd); if (!f.file) return -ENXIO; - fdput(f); - if (f.file->f_op != &io_uring_fops) + if (f.file->f_op != &io_uring_fops) { + fdput(f); return -EINVAL; + } + fdput(f); } if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; diff --git a/fs/proc/base.c b/fs/proc/base.c index 9cbd915025ad..a0a2fc1c9da2 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -854,7 +854,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
while (count > 0) { - int this_len = min_t(int, count, PAGE_SIZE); + size_t this_len = min_t(size_t, count, PAGE_SIZE);
if (write && copy_from_user(page, buf, this_len)) { copied = -EFAULT; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 14f92285d04f..8fc8bbf9635b 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1236,23 +1236,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, }
static __always_inline int validate_range(struct mm_struct *mm, - __u64 *start, __u64 len) + __u64 start, __u64 len) { __u64 task_size = mm->task_size;
- *start = untagged_addr(*start); - - if (*start & ~PAGE_MASK) + if (start & ~PAGE_MASK) return -EINVAL; if (len & ~PAGE_MASK) return -EINVAL; if (!len) return -EINVAL; - if (*start < mmap_min_addr) + if (start < mmap_min_addr) return -EINVAL; - if (*start >= task_size) + if (start >= task_size) return -EINVAL; - if (len > task_size - *start) + if (len > task_size - start) return -EINVAL; return 0; } @@ -1313,7 +1311,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, vm_flags |= VM_UFFD_MINOR; }
- ret = validate_range(mm, &uffdio_register.range.start, + ret = validate_range(mm, uffdio_register.range.start, uffdio_register.range.len); if (ret) goto out; @@ -1519,7 +1517,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) goto out;
- ret = validate_range(mm, &uffdio_unregister.start, + ret = validate_range(mm, uffdio_unregister.start, uffdio_unregister.len); if (ret) goto out; @@ -1668,7 +1666,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx, if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) goto out;
- ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len); + ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); if (ret) goto out;
@@ -1708,7 +1706,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, sizeof(uffdio_copy)-sizeof(__s64))) goto out;
- ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len); + ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); if (ret) goto out; /* @@ -1765,7 +1763,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, sizeof(uffdio_zeropage)-sizeof(__s64))) goto out;
- ret = validate_range(ctx->mm, &uffdio_zeropage.range.start, + ret = validate_range(ctx->mm, uffdio_zeropage.range.start, uffdio_zeropage.range.len); if (ret) goto out; @@ -1815,7 +1813,7 @@ static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, sizeof(struct uffdio_writeprotect))) return -EFAULT;
- ret = validate_range(ctx->mm, &uffdio_wp.range.start, + ret = validate_range(ctx->mm, uffdio_wp.range.start, uffdio_wp.range.len); if (ret) return ret; @@ -1863,7 +1861,7 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) sizeof(uffdio_continue) - (sizeof(__s64)))) goto out;
- ret = validate_range(ctx->mm, &uffdio_continue.range.start, + ret = validate_range(ctx->mm, uffdio_continue.range.start, uffdio_continue.range.len); if (ret) goto out; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 3a82faac5767..394eb42d0ec1 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -698,11 +698,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); * @hrv: Hardware Revision of the device, pass -1 to not check _HRV * * The caller is responsible for invoking acpi_dev_put() on the returned device. - * - * FIXME: Due to above requirement there is a window that may invalidate @adev - * and next iteration will use a dangling pointer, e.g. in the case of a - * hotplug event. That said, the caller should ensure that this will never - * happen. */ #define for_each_acpi_dev_match(adev, hid, uid, hrv) \ for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \ @@ -716,7 +711,8 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
static inline void acpi_dev_put(struct acpi_device *adev) { - put_device(&adev->dev); + if (adev) + put_device(&adev->dev); } #else /* CONFIG_ACPI */
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index 10100a4bbe2a..afb27cb6a7bd 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) #define DRM_MAJOR 226
/** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 832b49b50c7b..fc80a40f5053 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -329,6 +329,7 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); + flush_dcache_page(page); kunmap_local(to); }
@@ -336,6 +337,7 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_atomic(page); memset(addr + offset, 0, len); + flush_dcache_page(page); kunmap_atomic(addr); }
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index acee44b9db26..0f06c2287b52 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,14 +22,10 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10
-/* PHY IDs and mask for Alaska 10G PHYs */ -#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88X3340 0x002b09a8 - /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5984fff3f175..0df96a399d67 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -207,7 +207,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
/** * for_each_mem_range_rev - reverse iterate through memblock areas from @@ -218,7 +218,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range_rev(i, p_start, p_end) \ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
/** * for_each_reserved_mem_range - iterate over all reserved memblock areas diff --git a/include/net/bonding.h b/include/net/bonding.h index 019e998d944a..a02b19843819 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -201,6 +201,11 @@ struct bond_up_slave { */ #define BOND_LINK_NOCHANGE -1
+struct bond_ipsec { + struct list_head list; + struct xfrm_state *xs; +}; + /* * Here are the locking policies for the two bonding locks: * Get rcu_read_lock when reading or RTNL when writing slave list. @@ -249,7 +254,9 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; #ifdef CONFIG_XFRM_OFFLOAD - struct xfrm_state *xs; + struct list_head ipsec_list; + /* protecting ipsec_list */ + spinlock_t ipsec_lock; #endif /* CONFIG_XFRM_OFFLOAD */ };
diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 83f23774b908..f1d798ff29e9 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -101,7 +101,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, unsigned int *size, unsigned int remaining, struct mptcp_out_options *opts); -void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb); +bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, struct mptcp_out_options *opts); @@ -223,9 +223,10 @@ static inline bool mptcp_established_options(struct sock *sk, return false; }
-static inline void mptcp_incoming_options(struct sock *sk, +static inline bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { + return true; }
static inline void mptcp_skb_ext_move(struct sk_buff *to, diff --git a/include/sound/soc.h b/include/sound/soc.h index e746da996351..723eeb1c3f78 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -712,6 +712,12 @@ struct snd_soc_dai_link { /* Do not create a PCM for this DAI link (Backend link) */ unsigned int ignore:1;
+ /* This flag will reorder stop sequence. By enabling this flag + * DMA controller stop sequence will be invoked first followed by + * CPU DAI driver stop sequence + */ + unsigned int stop_dma_first:1; + #ifdef CONFIG_SND_SOC_TOPOLOGY struct snd_soc_dobj dobj; /* For topology */ #endif diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 3ccf591b2374..9f73ed2cf061 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -174,6 +174,34 @@ enum afs_vl_operation { afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */ };
+enum afs_cm_operation { + afs_CB_CallBack = 204, /* AFS break callback promises */ + afs_CB_InitCallBackState = 205, /* AFS initialise callback state */ + afs_CB_Probe = 206, /* AFS probe client */ + afs_CB_GetLock = 207, /* AFS get contents of CM lock table */ + afs_CB_GetCE = 208, /* AFS get cache file description */ + afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */ + afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */ + afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */ + afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */ +}; + +enum yfs_cm_operation { + yfs_CB_Probe = 206, /* YFS probe client */ + yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */ + yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */ + yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */ + yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */ + yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */ + yfs_CB_GetServerPrefs = 215, + yfs_CB_GetCellServDV = 216, + yfs_CB_GetLocalCell = 217, + yfs_CB_GetCacheConfig = 218, + yfs_CB_GetCellByNum = 65537, + yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */ + yfs_CB_CallBack = 64204, +}; + enum afs_edit_dir_op { afs_edit_dir_create, afs_edit_dir_create_error, @@ -436,6 +464,32 @@ enum afs_cb_break_reason { EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \ E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
+#define afs_cm_operations \ + EM(afs_CB_CallBack, "CB.CallBack") \ + EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \ + EM(afs_CB_Probe, "CB.Probe") \ + EM(afs_CB_GetLock, "CB.GetLock") \ + EM(afs_CB_GetCE, "CB.GetCE") \ + EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \ + EM(afs_CB_GetXStats, "CB.GetXStats") \ + EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \ + E_(afs_CB_ProbeUuid, "CB.ProbeUuid") + +#define yfs_cm_operations \ + EM(yfs_CB_Probe, "YFSCB.Probe") \ + EM(yfs_CB_GetLock, "YFSCB.GetLock") \ + EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \ + EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \ + EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \ + EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \ + EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \ + EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \ + EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \ + EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \ + EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \ + EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \ + E_(yfs_CB_CallBack, "YFSCB.CallBack") + #define afs_edit_dir_ops \ EM(afs_edit_dir_create, "create") \ EM(afs_edit_dir_create_error, "c_fail") \ @@ -569,6 +623,8 @@ afs_server_traces; afs_cell_traces; afs_fs_operations; afs_vl_operations; +afs_cm_operations; +yfs_cm_operations; afs_edit_dir_ops; afs_edit_dir_reasons; afs_eproto_causes; @@ -649,20 +705,21 @@ TRACE_EVENT(afs_cb_call,
TP_STRUCT__entry( __field(unsigned int, call ) - __field(const char *, name ) __field(u32, op ) + __field(u16, service_id ) ),
TP_fast_assign( __entry->call = call->debug_id; - __entry->name = call->type->name; __entry->op = call->operation_ID; + __entry->service_id = call->service_id; ),
- TP_printk("c=%08x %s o=%u", + TP_printk("c=%08x %s", __entry->call, - __entry->name, - __entry->op) + __entry->service_id == 2501 ? + __print_symbolic(__entry->op, yfs_cm_operations) : + __print_symbolic(__entry->op, afs_cm_operations)) );
TRACE_EVENT(afs_call, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d8a6fcd28e39..e6db39a00de2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3675,6 +3675,8 @@ static int check_max_stack_depth(struct bpf_verifier_env *env) if (tail_call_reachable) for (j = 0; j < frame; j++) subprog[ret_prog[j]].tail_call_reachable = true; + if (subprog[0].tail_call_reachable) + env->prog->aux->tail_call_reachable = true;
/* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c index 910ae69cae77..af4a6ef48ce0 100644 --- a/kernel/dma/ops_helpers.c +++ b/kernel/dma/ops_helpers.c @@ -5,6 +5,13 @@ */ #include <linux/dma-map-ops.h>
+static struct page *dma_common_vaddr_to_page(void *cpu_addr) +{ + if (is_vmalloc_addr(cpu_addr)) + return vmalloc_to_page(cpu_addr); + return virt_to_page(cpu_addr); +} + /* * Create scatter-list for the already allocated DMA buffer. */ @@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct page *page = virt_to_page(cpu_addr); + struct page *page = dma_common_vaddr_to_page(cpu_addr); int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL); @@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; + struct page *page = dma_common_vaddr_to_page(cpu_addr); int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); @@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return -ENXIO;
return remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff, + page_to_pfn(page) + vma->vm_pgoff, user_count << PAGE_SHIFT, vma->vm_page_prot); #else return -ENXIO; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 3bb96a8b49c9..aa52fc85dbcb 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) if (!p) goto out;
+ /* Protect timer list r/w in arm_timer() */ + sighand = lock_task_sighand(p, &flags); + if (unlikely(sighand == NULL)) + goto out; + /* * Fetch the current sample and update the timer's expiry time. */ @@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
bump_cpu_timer(timer, now);
- /* Protect timer list r/w in arm_timer() */ - sighand = lock_task_sighand(p, &flags); - if (unlikely(sighand == NULL)) - goto out; - /* * Now re-arm for the new expiry time. */ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index d111adf4a0cb..99b97ccefdbd 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -207,6 +207,7 @@ struct timer_base { unsigned int cpu; bool next_expiry_recalc; bool is_idle; + bool timers_pending; DECLARE_BITMAP(pending_map, WHEEL_SIZE); struct hlist_head vectors[WHEEL_SIZE]; } ____cacheline_aligned; @@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer, * can reevaluate the wheel: */ base->next_expiry = bucket_expiry; + base->timers_pending = true; base->next_expiry_recalc = false; trigger_dyntick_cpu(base, timer); } @@ -1596,6 +1598,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base) }
base->next_expiry_recalc = false; + base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
return next; } @@ -1647,7 +1650,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); u64 expires = KTIME_MAX; unsigned long nextevt; - bool is_max_delta;
/* * Pretend that there is no timer pending if the cpu is offline. @@ -1660,7 +1662,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) if (base->next_expiry_recalc) base->next_expiry = __next_timer_interrupt(base); nextevt = base->next_expiry; - is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
/* * We have a fresh next event. Check whether we can forward the @@ -1678,7 +1679,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) expires = basem; base->is_idle = false; } else { - if (!is_max_delta) + if (base->timers_pending) expires = basem + (u64)(nextevt - basej) * TICK_NSEC; /* * If we expect to sleep more than a tick, mark the base idle. @@ -1961,6 +1962,7 @@ int timers_prepare_cpu(unsigned int cpu) base = per_cpu_ptr(&timer_bases[b], cpu); base->clk = jiffies; base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; + base->timers_pending = false; base->is_idle = false; } return 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2c0ee6484990..16a8d8da29f7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) if (unlikely(!head)) return true;
- return reader->read == rb_page_commit(reader) && - (commit == reader || - (commit == head && - head->read == rb_page_commit(commit))); + /* Reader should exhaust content in reader page */ + if (reader->read != rb_page_commit(reader)) + return false; + + /* + * If writers are committing on the reader page, knowing all + * committed content has been read, the ring buffer is empty. + */ + if (commit == reader) + return true; + + /* + * If writers are committing on a page other than reader page + * and head page, there should always be content to read. + */ + if (commit != head) + return false; + + /* + * Writers are committing on the head page, we just need + * to care about there're committed data, and the reader will + * swap reader page with head page when it is to read data. + */ + return rb_page_commit(commit) == 0; }
/** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1afd3b57ae51..20196380fc54 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5565,6 +5565,10 @@ static const char readme_msg[] = "\t [:name=histname1]\n" "\t [:<handler>.<action>]\n" "\t [if <filter>]\n\n" + "\t Note, special fields can be used as well:\n" + "\t common_timestamp - to record current timestamp\n" + "\t common_cpu - to record the CPU the event happened on\n" + "\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 53b4ab9ea305..023777a4a04d 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1111,7 +1111,7 @@ static const char *hist_field_name(struct hist_field *field, field->flags & HIST_FIELD_FL_ALIAS) field_name = hist_field_name(field->operands[0], ++level); else if (field->flags & HIST_FIELD_FL_CPU) - field_name = "cpu"; + field_name = "common_cpu"; else if (field->flags & HIST_FIELD_FL_EXPR || field->flags & HIST_FIELD_FL_VAR_REF) { if (field->system) { @@ -1991,14 +1991,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, hist_data->enable_timestamps = true; if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) hist_data->attrs->ts_in_usecs = true; - } else if (strcmp(field_name, "cpu") == 0) + } else if (strcmp(field_name, "common_cpu") == 0) *flags |= HIST_FIELD_FL_CPU; else { field = trace_find_event_field(file->event_call, field_name); if (!field || !field->size) { - hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); - field = ERR_PTR(-EINVAL); - goto out; + /* + * For backward compatibility, if field_name + * was "cpu", then we treat this the same as + * common_cpu. + */ + if (strcmp(field_name, "cpu") == 0) { + *flags |= HIST_FIELD_FL_CPU; + } else { + hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, + errpos(field_name)); + field = ERR_PTR(-EINVAL); + goto out; + } } } out: @@ -5085,7 +5095,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) seq_printf(m, "%s=", hist_field->var.name);
if (hist_field->flags & HIST_FIELD_FL_CPU) - seq_puts(m, "cpu"); + seq_puts(m, "common_cpu"); else if (field_name) { if (hist_field->flags & HIST_FIELD_FL_VAR_REF || hist_field->flags & HIST_FIELD_FL_ALIAS) diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h index 6e146b959dcd..4007fe95cf42 100644 --- a/kernel/trace/trace_synth.h +++ b/kernel/trace/trace_synth.h @@ -14,10 +14,10 @@ struct synth_field { char *name; size_t size; unsigned int offset; + unsigned int field_pos; bool is_signed; bool is_string; bool is_dynamic; - bool field_pos; };
struct synth_event { diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 976bf8ce8039..fc32821f8240 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -299,8 +299,8 @@ static int tracepoint_add_func(struct tracepoint *tp, * a pointer to it. This array is referenced by __DO_TRACE from * include/linux/tracepoint.h using rcu_dereference_sched(). */ - rcu_assign_pointer(tp->funcs, tp_funcs); tracepoint_update_call(tp, tp_funcs, false); + rcu_assign_pointer(tp->funcs, tp_funcs); static_key_enable(&tp->key);
release_probes(old); diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d7666ace9d2e..575c685aa642 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -733,6 +733,22 @@ void kfence_shutdown_cache(struct kmem_cache *s)
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { + /* + * Perform size check before switching kfence_allocation_gate, so that + * we don't disable KFENCE without making an allocation. + */ + if (size > PAGE_SIZE) + return NULL; + + /* + * Skip allocations from non-default zones, including DMA. We cannot + * guarantee that pages in the KFENCE pool will have the requested + * properties (e.g. reside in DMAable memory). + */ + if ((flags & GFP_ZONEMASK) || + (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) + return NULL; + /* * allocation_gate only needs to become non-zero, so it doesn't make * sense to continue writing to it and pay the associated contention @@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (!READ_ONCE(kfence_enabled)) return NULL;
- if (size > PAGE_SIZE) - return NULL; - return kfence_guarded_alloc(s, size, flags); }
diff --git a/mm/memblock.c b/mm/memblock.c index afaefa8fc6ab..d47b7afc9dc4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -940,7 +940,8 @@ static bool should_skip_region(struct memblock_type *type, return true;
/* skip hotpluggable memory regions if needed */ - if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) + if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && + !(flags & MEMBLOCK_HOTPLUG)) return true;
/* if we want mirror memory skip non-mirror memory regions */ diff --git a/mm/memory.c b/mm/memory.c index 3fdba3ec69c8..f2293c0df33f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3891,8 +3891,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return ret; }
- if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) + if (vmf->prealloc_pte) { + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (likely(pmd_none(*vmf->pmd))) { + mm_inc_nr_ptes(vma->vm_mm); + pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); + vmf->prealloc_pte = NULL; + } + spin_unlock(vmf->ptl); + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { return VM_FAULT_OOM; + } }
/* See comment in handle_pte_fault() */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fc5beebf6988..de392908bf64 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -801,21 +801,24 @@ void init_mem_debugging_and_hardening(void) } #endif
- if (_init_on_alloc_enabled_early) { - if (page_poisoning_requested) - pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_alloc\n"); - else - static_branch_enable(&init_on_alloc); - } - if (_init_on_free_enabled_early) { - if (page_poisoning_requested) - pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_free\n"); - else - static_branch_enable(&init_on_free); + if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && + page_poisoning_requested) { + pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " + "will take precedence over init_on_alloc and init_on_free\n"); + _init_on_alloc_enabled_early = false; + _init_on_free_enabled_early = false; }
+ if (_init_on_alloc_enabled_early) + static_branch_enable(&init_on_alloc); + else + static_branch_disable(&init_on_alloc); + + if (_init_on_free_enabled_early) + static_branch_enable(&init_on_free); + else + static_branch_disable(&init_on_free); + #ifdef CONFIG_DEBUG_PAGEALLOC if (!debug_pagealloc_enabled()) return; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index a5d72c48fb66..28ac3c96fa88 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -701,6 +701,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret;
+ if (prog->expected_attach_type == BPF_XDP_DEVMAP || + prog->expected_attach_type == BPF_XDP_CPUMAP) + return -EINVAL; if (kattr->test.ctx_in || kattr->test.ctx_out) return -EINVAL;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 3ad0a1df6712..9d26c5e9da05 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg, goto err;
ret = -EINVAL; - if (unlikely(msg->msg_iter.iov->iov_base == NULL)) + if (unlikely(msg->msg_iter.nr_segs == 0) || + unlikely(msg->msg_iter.iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT;
diff --git a/net/core/dev.c b/net/core/dev.c index 4f29dde4ed0a..04c4e236952f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5981,6 +5981,19 @@ static void gro_list_prepare(const struct list_head *head, diffs = memcmp(skb_mac_header(p), skb_mac_header(skb), maclen); + + diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); +#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + if (!diffs) { + struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT); + struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT); + + diffs |= (!!p_ext) ^ (!!skb_ext); + if (!diffs && unlikely(skb_ext)) + diffs |= p_ext->chain ^ skb_ext->chain; + } +#endif + NAPI_GRO_CB(p)->same_flow = !diffs; } } @@ -6245,6 +6258,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); skb_ext_reset(skb); + nf_reset_ct(skb);
napi->skb = skb; } @@ -9659,14 +9673,17 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) struct net_device *dev; int err, fd;
+ rtnl_lock(); dev = dev_get_by_index(net, attr->link_create.target_ifindex); - if (!dev) + if (!dev) { + rtnl_unlock(); return -EINVAL; + }
link = kzalloc(sizeof(*link), GFP_USER); if (!link) { err = -ENOMEM; - goto out_put_dev; + goto unlock; }
bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); @@ -9676,14 +9693,14 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) err = bpf_link_prime(&link->link, &link_primer); if (err) { kfree(link); - goto out_put_dev; + goto unlock; }
- rtnl_lock(); err = dev_xdp_attach_link(dev, NULL, link); rtnl_unlock();
if (err) { + link->dev = NULL; bpf_link_cleanup(&link_primer); goto out_put_dev; } @@ -9693,6 +9710,9 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) dev_put(dev); return fd;
+unlock: + rtnl_unlock(); + out_put_dev: dev_put(dev); return err; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bbc3b4b62032..30ca61d91b69 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -939,6 +939,7 @@ void __kfree_skb_defer(struct sk_buff *skb)
void napi_skb_free_stolen_head(struct sk_buff *skb) { + nf_reset_ct(skb); skb_dst_drop(skb); skb_ext_put(skb); napi_skb_cache_put(skb); diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 539c83a45665..b2410a1bfa23 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -531,10 +531,8 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, if (skb_linearize(skb)) return -EAGAIN; num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len); - if (unlikely(num_sge < 0)) { - kfree(msg); + if (unlikely(num_sge < 0)) return num_sge; - }
copied = skb->len; msg->sg.start = 0; @@ -553,6 +551,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) { struct sock *sk = psock->sk; struct sk_msg *msg; + int err;
/* If we are receiving on the same sock skb->sk is already assigned, * skip memory accounting and owner transition seeing it already set @@ -571,7 +570,10 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) * into user buffers. */ skb_set_owner_r(skb, sk); - return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + if (err < 0) + kfree(msg); + return err; }
/* Puts an skb on the ingress queue of the socket already assigned to the @@ -582,12 +584,16 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb { struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC); struct sock *sk = psock->sk; + int err;
if (unlikely(!msg)) return -EAGAIN; sk_msg_init(msg); skb_set_owner_r(skb, sk); - return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg); + if (err < 0) + kfree(msg); + return err; }
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 5dbd45dc35ad..dc92a67baea3 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -816,7 +816,7 @@ static int dn_auto_bind(struct socket *sock) static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int err;
if (scp->state != DN_CR) @@ -826,11 +826,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); dn_send_conn_conf(sk, allocation);
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); for(;;) { release_sock(sk); if (scp->state == DN_CC) - *timeo = schedule_timeout(*timeo); + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) @@ -844,9 +844,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) err = -EAGAIN; if (!*timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CC) { @@ -858,7 +857,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) static int dn_wait_run(struct sock *sk, long *timeo) { struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int err = 0;
if (scp->state == DN_RUN) @@ -867,11 +866,11 @@ static int dn_wait_run(struct sock *sk, long *timeo) if (!*timeo) return -EALREADY;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); for(;;) { release_sock(sk); if (scp->state == DN_CI || scp->state == DN_CC) - *timeo = schedule_timeout(*timeo); + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) @@ -885,9 +884,8 @@ static int dn_wait_run(struct sock *sk, long *timeo) err = -ETIMEDOUT; if (!*timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); out: if (err == 0) { sk->sk_socket->state = SS_CONNECTED; @@ -1032,16 +1030,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) { - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sk_buff *skb = NULL; int err = 0;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); for(;;) { release_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { - *timeo = schedule_timeout(*timeo); + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); skb = skb_dequeue(&sk->sk_receive_queue); } lock_sock(sk); @@ -1056,9 +1054,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) err = -EAGAIN; if (!*timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait);
return skb == NULL ? ERR_PTR(err) : skb; } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index ad9d17923fc5..b65201ba4d93 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -486,7 +486,7 @@ static int __init tcp_bpf_v4_build_proto(void) tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); return 0; } -core_initcall(tcp_bpf_v4_build_proto); +late_initcall(tcp_bpf_v4_build_proto);
static int tcp_bpf_assert_proto_ops(struct proto *ops) { diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index af2814c9342a..d49709ba8e16 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -507,8 +507,18 @@ void tcp_fastopen_active_disable(struct sock *sk) { struct net *net = sock_net(sk);
+ if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout) + return; + + /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ + WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); + + /* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). + * We want net->ipv4.tfo_active_disable_stamp to be updated first. + */ + smp_mb__before_atomic(); atomic_inc(&net->ipv4.tfo_active_disable_times); - net->ipv4.tfo_active_disable_stamp = jiffies; + NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); }
@@ -519,17 +529,27 @@ void tcp_fastopen_active_disable(struct sock *sk) bool tcp_fastopen_active_should_disable(struct sock *sk) { unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; - int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); unsigned long timeout; + int tfo_da_times; int multiplier;
+ if (!tfo_bh_timeout) + return false; + + tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); if (!tfo_da_times) return false;
+ /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */ + smp_rmb(); + /* Limit timout to max: 2^6 * initial timeout */ multiplier = 1 << min(tfo_da_times - 1, 6); - timeout = multiplier * tfo_bh_timeout * HZ; - if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout)) + + /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */ + timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) + + multiplier * tfo_bh_timeout * HZ; + if (time_before(jiffies, timeout)) return true;
/* Mark check bit so we can check for successful active TFO diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6bd628f08ded..0f1b4bfddfd4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4247,6 +4247,9 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb) { trace_tcp_receive_reset(sk);
+ /* mptcp can't tell us to ignore reset pkts, + * so just ignore the return value of mptcp_incoming_options(). + */ if (sk_is_mptcp(sk)) mptcp_incoming_options(sk, skb);
@@ -4941,8 +4944,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) bool fragstolen; int eaten;
- if (sk_is_mptcp(sk)) - mptcp_incoming_options(sk, skb); + /* If a subflow has been reset, the packet should not continue + * to be processed, drop the packet. + */ + if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) { + __kfree_skb(skb); + return; + }
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { __kfree_skb(skb); @@ -6522,8 +6530,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { - if (sk_is_mptcp(sk)) - mptcp_incoming_options(sk, skb); + /* If a subflow has been reset, the packet should not + * continue to be processed, drop the packet. + */ + if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) + goto discard; break; } fallthrough; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e409f2de5dc4..8bb5f7f51dae 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2954,7 +2954,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_comp_sack_nr = 44; net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock); - net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; + net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0; atomic_set(&net->ipv4.tfo_active_disable_times, 0);
/* Reno is always built in */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ca9cf1051b1e..568dc31a0467 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -645,10 +645,12 @@ static struct sock *__udp4_lib_err_encap(struct net *net, const struct iphdr *iph, struct udphdr *uh, struct udp_table *udptable, + struct sock *sk, struct sk_buff *skb, u32 info) { + int (*lookup)(struct sock *sk, struct sk_buff *skb); int network_offset, transport_offset; - struct sock *sk; + struct udp_sock *up;
network_offset = skb_network_offset(skb); transport_offset = skb_transport_offset(skb); @@ -659,18 +661,28 @@ static struct sock *__udp4_lib_err_encap(struct net *net, /* Transport header needs to point to the UDP header */ skb_set_transport_header(skb, iph->ihl << 2);
+ if (sk) { + up = udp_sk(sk); + + lookup = READ_ONCE(up->encap_err_lookup); + if (lookup && lookup(sk, skb)) + sk = NULL; + + goto out; + } + sk = __udp4_lib_lookup(net, iph->daddr, uh->source, iph->saddr, uh->dest, skb->dev->ifindex, 0, udptable, NULL); if (sk) { - int (*lookup)(struct sock *sk, struct sk_buff *skb); - struct udp_sock *up = udp_sk(sk); + up = udp_sk(sk);
lookup = READ_ONCE(up->encap_err_lookup); if (!lookup || lookup(sk, skb)) sk = NULL; }
+out: if (!sk) sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
@@ -707,15 +719,16 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, inet_sdif(skb), udptable, NULL); + if (!sk || udp_sk(sk)->encap_type) { /* No socket for error: try tunnels before discarding */ - sk = ERR_PTR(-ENOENT); if (static_branch_unlikely(&udp_encap_needed_key)) { - sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb, + sk = __udp4_lib_err_encap(net, iph, uh, udptable, sk, skb, info); if (!sk) return 0; - } + } else + sk = ERR_PTR(-ENOENT);
if (IS_ERR(sk)) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c index 954c4591a6fd..725b6df4b2a2 100644 --- a/net/ipv4/udp_bpf.c +++ b/net/ipv4/udp_bpf.c @@ -101,7 +101,7 @@ static int __init udp_bpf_v4_build_proto(void) udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot); return 0; } -core_initcall(udp_bpf_v4_build_proto); +late_initcall(udp_bpf_v4_build_proto);
int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 497974b4372a..b7ffb4f227a4 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -479,7 +479,9 @@ int ip6_forward(struct sk_buff *skb) if (skb_warn_if_lro(skb)) goto drop;
- if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { + if (!net->ipv6.devconf_all->disable_policy && + !idev->cnf.disable_policy && + !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d417e514bd52..09e84161b731 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3642,7 +3642,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, err = PTR_ERR(rt->fib6_metrics); /* Do not leave garbage there. */ rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; - goto out; + goto out_free; }
if (cfg->fc_flags & RTF_ADDRCONF) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6774e776228c..2d3bd4a9b0d0 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -502,12 +502,14 @@ static struct sock *__udp6_lib_err_encap(struct net *net, const struct ipv6hdr *hdr, int offset, struct udphdr *uh, struct udp_table *udptable, + struct sock *sk, struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, __be32 info) { + int (*lookup)(struct sock *sk, struct sk_buff *skb); int network_offset, transport_offset; - struct sock *sk; + struct udp_sock *up;
network_offset = skb_network_offset(skb); transport_offset = skb_transport_offset(skb); @@ -518,18 +520,28 @@ static struct sock *__udp6_lib_err_encap(struct net *net, /* Transport header needs to point to the UDP header */ skb_set_transport_header(skb, offset);
+ if (sk) { + up = udp_sk(sk); + + lookup = READ_ONCE(up->encap_err_lookup); + if (lookup && lookup(sk, skb)) + sk = NULL; + + goto out; + } + sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, &hdr->saddr, uh->dest, inet6_iif(skb), 0, udptable, skb); if (sk) { - int (*lookup)(struct sock *sk, struct sk_buff *skb); - struct udp_sock *up = udp_sk(sk); + up = udp_sk(sk);
lookup = READ_ONCE(up->encap_err_lookup); if (!lookup || lookup(sk, skb)) sk = NULL; }
+out: if (!sk) { sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, offset, info)); @@ -558,16 +570,17 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, inet6_iif(skb), inet6_sdif(skb), udptable, NULL); + if (!sk || udp_sk(sk)->encap_type) { /* No socket for error: try tunnels before discarding */ - sk = ERR_PTR(-ENOENT); if (static_branch_unlikely(&udpv6_encap_needed_key)) { sk = __udp6_lib_err_encap(net, hdr, offset, uh, - udptable, skb, + udptable, sk, skb, opt, type, code, info); if (!sk) return 0; - } + } else + sk = ERR_PTR(-ENOENT);
if (IS_ERR(sk)) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index eb2dc6dbe212..c8f4823cd79f 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -42,6 +42,7 @@ static const struct snmp_mib mptcp_snmp_list[] = { SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW), SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX), SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX), + SNMP_MIB_ITEM("RcvPruned", MPTCP_MIB_RCVPRUNED), SNMP_MIB_SENTINEL };
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index f0da4f060fe1..93fa7c95e206 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -35,6 +35,7 @@ enum linux_mptcp_mib_field { MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */ MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */ MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */ + MPTCP_MIB_RCVPRUNED, /* Incoming packet dropped due to memory limit */ __MPTCP_MIB_MAX };
diff --git a/net/mptcp/options.c b/net/mptcp/options.c index b87e46f515fb..4f08e04e1ab7 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -323,7 +323,8 @@ static void mptcp_parse_option(const struct sk_buff *skb, } }
-void mptcp_get_options(const struct sk_buff *skb, +void mptcp_get_options(const struct sock *sk, + const struct sk_buff *skb, struct mptcp_options_received *mp_opt) { const struct tcphdr *th = tcp_hdr(skb); @@ -989,7 +990,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk, return hmac == mp_opt->ahmac; }
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) +/* Return false if a subflow has been reset, else return true */ +bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct mptcp_sock *msk = mptcp_sk(subflow->conn); @@ -1007,12 +1009,16 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) __mptcp_check_push(subflow->conn, sk); __mptcp_data_acked(subflow->conn); mptcp_data_unlock(subflow->conn); - return; + return true; }
- mptcp_get_options(skb, &mp_opt); + mptcp_get_options(sk, skb, &mp_opt); + + /* The subflow can be in close state only if check_fully_established() + * just sent a reset. If so, tell the caller to ignore the current packet. + */ if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) - return; + return sk->sk_state != TCP_CLOSE;
if (mp_opt.fastclose && msk->local_key == mp_opt.rcvr_key) { @@ -1054,7 +1060,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) }
if (!mp_opt.dss) - return; + return true;
/* we can't wait for recvmsg() to update the ack_seq, otherwise * monodirectional flows will stuck @@ -1073,12 +1079,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) schedule_work(&msk->work)) sock_hold(subflow->conn);
- return; + return true; }
mpext = skb_ext_add(skb, SKB_EXT_MPTCP); if (!mpext) - return; + return true;
memset(mpext, 0, sizeof(*mpext));
@@ -1103,6 +1109,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) mpext->data_len = mp_opt.data_len; mpext->use_map = 1; } + + return true; }
static void mptcp_set_rwin(const struct tcp_sock *tp) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 3f5d90a20235..fce1d057d19e 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -540,6 +540,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node); if (subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow;
spin_unlock_bh(&msk->pm.lock); pr_debug("send ack for %s%s%s", @@ -547,9 +548,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "", mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
- lock_sock(ssk); + slow = lock_sock_fast(ssk); tcp_send_ack(ssk); - release_sock(ssk); + unlock_sock_fast(ssk, slow); spin_lock_bh(&msk->pm.lock); } } @@ -566,6 +567,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *sk = (struct sock *)msk; struct mptcp_addr_info local; + bool slow;
local_address((struct sock_common *)ssk, &local); if (!addresses_equal(&local, addr, addr->port)) @@ -578,9 +580,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
spin_unlock_bh(&msk->pm.lock); pr_debug("send ack for mp_prio"); - lock_sock(ssk); + slow = lock_sock_fast(ssk); tcp_send_ack(ssk); - release_sock(ssk); + unlock_sock_fast(ssk, slow); spin_lock_bh(&msk->pm.lock);
return 0; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 8ead550df8b1..dde68da0c1f9 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -424,56 +424,55 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow;
- lock_sock(ssk); + slow = lock_sock_fast(ssk); if (tcp_can_send_ack(ssk)) tcp_send_ack(ssk); - release_sock(ssk); + unlock_sock_fast(ssk, slow); } }
-static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk) +static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) { - int ret; + bool slow;
- lock_sock(ssk); - ret = tcp_can_send_ack(ssk); - if (ret) + slow = lock_sock_fast(ssk); + if (tcp_can_send_ack(ssk)) tcp_cleanup_rbuf(ssk, 1); - release_sock(ssk); - return ret; + unlock_sock_fast(ssk, slow); +} + +static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) +{ + const struct inet_connection_sock *icsk = inet_csk(ssk); + u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending); + const struct tcp_sock *tp = tcp_sk(ssk); + + return (ack_pending & ICSK_ACK_SCHED) && + ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) > + READ_ONCE(icsk->icsk_ack.rcv_mss)) || + (rx_empty && ack_pending & + (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); }
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) { - struct sock *ack_hint = READ_ONCE(msk->ack_hint); int old_space = READ_ONCE(msk->old_wspace); struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; - bool cleanup; + int space = __mptcp_space(sk); + bool cleanup, rx_empty;
- /* this is a simple superset of what tcp_cleanup_rbuf() implements - * so that we don't have to acquire the ssk socket lock most of the time - * to do actually nothing - */ - cleanup = __mptcp_space(sk) - old_space >= max(0, old_space); - if (!cleanup) - return; + cleanup = (space > 0) && (space >= (old_space << 1)); + rx_empty = !__mptcp_rmem(sk);
- /* if the hinted ssk is still active, try to use it */ - if (likely(ack_hint)) { - mptcp_for_each_subflow(msk, subflow) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- if (ack_hint == ssk && mptcp_subflow_cleanup_rbuf(ssk)) - return; - } + if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) + mptcp_subflow_cleanup_rbuf(ssk); } - - /* otherwise pick the first active subflow */ - mptcp_for_each_subflow(msk, subflow) - if (mptcp_subflow_cleanup_rbuf(mptcp_subflow_tcp_sock(subflow))) - return; }
static bool mptcp_check_data_fin(struct sock *sk) @@ -618,7 +617,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, break; } } while (more_data_avail); - WRITE_ONCE(msk->ack_hint, ssk);
*bytes += moved; return done; @@ -716,8 +714,10 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/ - if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) + if (__mptcp_rmem(sk) > sk_rbuf) { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); return; + }
/* Wake-up the reader only for in-sequence data */ mptcp_data_lock(sk); @@ -1801,7 +1801,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, if (!(flags & MSG_PEEK)) { /* we will bulk release the skb memory later */ skb->destructor = NULL; - msk->rmem_released += skb->truesize; + WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); __skb_unlink(skb, &msk->receive_queue); __kfree_skb(skb); } @@ -1920,7 +1920,7 @@ static void __mptcp_update_rmem(struct sock *sk)
atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); sk_mem_uncharge(sk, msk->rmem_released); - msk->rmem_released = 0; + WRITE_ONCE(msk->rmem_released, 0); }
static void __mptcp_splice_receive_queue(struct sock *sk) @@ -1953,7 +1953,6 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk) __mptcp_update_rmem(sk); done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); mptcp_data_unlock(sk); - tcp_cleanup_rbuf(ssk, moved);
if (unlikely(ssk->sk_err)) __mptcp_error_report(sk); @@ -1969,7 +1968,6 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk) ret |= __mptcp_ofo_queue(msk); __mptcp_splice_receive_queue(sk); mptcp_data_unlock(sk); - mptcp_cleanup_rbuf(msk); } if (ret) mptcp_check_data_fin((struct sock *)msk); @@ -2214,9 +2212,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, if (ssk == msk->last_snd) msk->last_snd = NULL;
- if (ssk == msk->ack_hint) - msk->ack_hint = NULL; - if (ssk == msk->first) msk->first = NULL;
@@ -2288,13 +2283,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); + bool slow;
- lock_sock(tcp_sk); + slow = lock_sock_fast(tcp_sk); if (tcp_sk->sk_state != TCP_CLOSE) { tcp_send_active_reset(tcp_sk, GFP_ATOMIC); tcp_set_state(tcp_sk, TCP_CLOSE); } - release_sock(tcp_sk); + unlock_sock_fast(tcp_sk, slow); }
inet_sk_state_store(sk, TCP_CLOSE); @@ -2426,11 +2422,10 @@ static int __mptcp_init_sock(struct sock *sk) msk->out_of_order_queue = RB_ROOT; msk->first_pending = NULL; msk->wmem_reserved = 0; - msk->rmem_released = 0; + WRITE_ONCE(msk->rmem_released, 0); msk->tx_pending_data = 0; msk->size_goal_cache = TCP_BASE_MSS;
- msk->ack_hint = NULL; msk->first = NULL; inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 7b634568f49c..dc5b71de0a9a 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -236,7 +236,6 @@ struct mptcp_sock { bool rcv_fastclose; bool use_64bit_ack; /* Set when we received a 64-bit DSN */ spinlock_t join_list_lock; - struct sock *ack_hint; struct work_struct work; struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; @@ -291,9 +290,17 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk) return (struct mptcp_sock *)sk; }
+/* the msk socket don't use the backlog, also account for the bulk + * free memory + */ +static inline int __mptcp_rmem(const struct sock *sk) +{ + return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released); +} + static inline int __mptcp_space(const struct sock *sk) { - return tcp_space(sk) + READ_ONCE(mptcp_sk(sk)->rmem_released); + return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk)); }
static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) @@ -576,7 +583,8 @@ int __init mptcp_proto_v6_init(void); struct sock *mptcp_sk_clone(const struct sock *sk, const struct mptcp_options_received *mp_opt, struct request_sock *req); -void mptcp_get_options(const struct sk_buff *skb, +void mptcp_get_options(const struct sock *sk, + const struct sk_buff *skb, struct mptcp_options_received *mp_opt);
void mptcp_finish_connect(struct sock *sk); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index cbc452d0901e..78e787ef8fff 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -150,7 +150,7 @@ static int subflow_check_req(struct request_sock *req, return -EINVAL; #endif
- mptcp_get_options(skb, &mp_opt); + mptcp_get_options(sk_listener, skb, &mp_opt);
if (mp_opt.mp_capable) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); @@ -212,11 +212,6 @@ static int subflow_check_req(struct request_sock *req, ntohs(inet_sk(sk_listener)->inet_sport), ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport)); if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) { - sock_put((struct sock *)subflow_req->msk); - mptcp_token_destroy_request(req); - tcp_request_sock_ops.destructor(req); - subflow_req->msk = NULL; - subflow_req->mp_join = 0; SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX); return -EPERM; } @@ -228,6 +223,8 @@ static int subflow_check_req(struct request_sock *req, if (unlikely(req->syncookie)) { if (mptcp_can_accept_new_subflow(subflow_req->msk)) subflow_init_req_cookie_join_save(subflow_req, skb); + else + return -EPERM; }
pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, @@ -247,7 +244,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, int err;
subflow_init_req(req, sk_listener); - mptcp_get_options(skb, &mp_opt); + mptcp_get_options(sk_listener, skb, &mp_opt);
if (mp_opt.mp_capable && mp_opt.mp_join) return -EINVAL; @@ -267,9 +264,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) return -EINVAL;
- if (mptcp_can_accept_new_subflow(subflow_req->msk)) - subflow_req->mp_join = 1; - + subflow_req->mp_join = 1; subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; }
@@ -408,7 +403,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) subflow->ssn_offset = TCP_SKB_CB(skb)->seq; pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
- mptcp_get_options(skb, &mp_opt); + mptcp_get_options(sk, skb, &mp_opt); if (subflow->request_mptcp) { if (!mp_opt.mp_capable) { MPTCP_INC_STATS(sock_net(sk), @@ -655,7 +650,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, * reordered MPC will cause fallback, but we don't have other * options. */ - mptcp_get_options(skb, &mp_opt); + mptcp_get_options(sk, skb, &mp_opt); if (!mp_opt.mp_capable) { fallback = true; goto create_child; @@ -665,7 +660,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, if (!new_msk) fallback = true; } else if (subflow_req->mp_join) { - mptcp_get_options(skb, &mp_opt); + mptcp_get_options(sk, skb, &mp_opt); if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) || !mptcp_can_accept_new_subflow(subflow_req->msk)) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c index abe0fd099746..37127781aee9 100644 --- a/net/mptcp/syncookies.c +++ b/net/mptcp/syncookies.c @@ -37,7 +37,21 @@ static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp
static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net) { - u32 i = skb_get_hash(skb) ^ net_hash_mix(net); + static u32 mptcp_join_hash_secret __read_mostly; + struct tcphdr *th = tcp_hdr(skb); + u32 seq, i; + + net_get_random_once(&mptcp_join_hash_secret, + sizeof(mptcp_join_hash_secret)); + + if (th->syn) + seq = TCP_SKB_CB(skb)->seq; + else + seq = TCP_SKB_CB(skb)->seq - 1; + + i = jhash_3words(seq, net_hash_mix(net), + (__force __u32)th->source << 16 | (__force __u32)th->dest, + mptcp_join_hash_secret);
return i % ARRAY_SIZE(join_entries); } diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index 9115f8a7dd45..a8da88db7893 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t) is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { - sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); - sock_put(sk); - return; + goto out; } break;
@@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
nr_start_heartbeat(sk); bh_unlock_sock(sk); +out: + sock_put(sk); }
static void nr_t2timer_expiry(struct timer_list *t) @@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t) nr_enquiry_response(sk); } bh_unlock_sock(sk); + sock_put(sk); }
static void nr_t4timer_expiry(struct timer_list *t) @@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t) bh_lock_sock(sk); nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; bh_unlock_sock(sk); + sock_put(sk); }
static void nr_idletimer_expiry(struct timer_list *t) @@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t) sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); + sock_put(sk); }
static void nr_t1timer_expiry(struct timer_list *t) @@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t) case NR_STATE_1: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - bh_unlock_sock(sk); - return; + goto out; } else { nr->n2count++; nr_write_internal(sk, NR_CONNREQ); @@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t) case NR_STATE_2: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - bh_unlock_sock(sk); - return; + goto out; } else { nr->n2count++; nr_write_internal(sk, NR_DISCREQ); @@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t) case NR_STATE_3: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - bh_unlock_sock(sk); - return; + goto out; } else { nr->n2count++; nr_requeue_frames(sk); @@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t) }
nr_start_t1timer(sk); +out: bh_unlock_sock(sk); + sock_put(sk); } diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 81a1c67335be..8d17a543cc9f 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -6,6 +6,7 @@ */
#include <linux/module.h> +#include <linux/if_arp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> @@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&d->tcf_tm); bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+ action = READ_ONCE(d->tcf_action); + if (unlikely(action == TC_ACT_SHOT)) + goto drop; + + if (!skb->dev || skb->dev->type != ARPHRD_ETHER) + return action; + /* XXX: if you are going to edit more fields beyond ethernet header * (example when you add IP header replacement or vlan swap) * then MAX_EDIT_LEN needs to change appropriately @@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, if (unlikely(err)) /* best policy is to drop on the floor */ goto drop;
- action = READ_ONCE(d->tcf_action); - if (unlikely(action == TC_ACT_SHOT)) - goto drop; - p = rcu_dereference_bh(d->skbmod_p); flags = p->flags; if (flags & SKBMOD_F_DMAC) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d73b5c5514a9..e3e79e9bd706 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -2904,7 +2904,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, break; case RTM_GETCHAIN: err = tc_chain_notify(chain, skb, n->nlmsg_seq, - n->nlmsg_seq, n->nlmsg_type, true); + n->nlmsg_flags, n->nlmsg_type, true); if (err < 0) NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); break; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 5b274534264c..e9a8a2c86bbd 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r, TCA_TCINDEX_POLICE); }
+static void tcindex_free_perfect_hash(struct tcindex_data *cp); + static void tcindex_partial_destroy_work(struct work_struct *work) { struct tcindex_data *p = container_of(to_rcu_work(work), @@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work) rwork);
rtnl_lock(); - kfree(p->perfect); + if (p->perfect) + tcindex_free_perfect_hash(p); kfree(p); rtnl_unlock(); } diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 6f8319b828b0..fe74c5f95630 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -860,6 +860,8 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, if (replace) { list_del_init(&shkey->key_list); sctp_auth_shkey_release(shkey); + if (asoc && asoc->active_key_id == auth_key->sca_keynumber) + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); } list_add(&cur_key->key_list, sh_keys);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a79d193ff872..dbd074f4d450 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4521,6 +4521,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, }
if (optlen > 0) { + /* Trim it to the biggest size sctp sockopt may need if necessary */ + optlen = min_t(unsigned int, optlen, + PAGE_ALIGN(USHRT_MAX + + sizeof(__u16) * sizeof(struct sctp_reset_streams))); kopt = memdup_sockptr(optval, optlen); if (IS_ERR(kopt)) return PTR_ERR(kopt); diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 53e300f860bb..33d0bdebbed8 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -96,6 +96,7 @@ static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; static int opt_timeout = 1000; static bool opt_need_wakeup = true; static u32 opt_num_xsks = 1; +static u32 prog_id; static bool opt_busy_poll; static bool opt_reduced_cap;
@@ -461,6 +462,23 @@ static void *poller(void *arg) return NULL; }
+static void remove_xdp_program(void) +{ + u32 curr_prog_id = 0; + + if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) { + printf("bpf_get_link_xdp_id failed\n"); + exit(EXIT_FAILURE); + } + + if (prog_id == curr_prog_id) + bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags); + else if (!curr_prog_id) + printf("couldn't find a prog id on a given interface\n"); + else + printf("program on interface changed, not removing\n"); +} + static void int_exit(int sig) { benchmark_done = true; @@ -471,6 +489,9 @@ static void __exit_with_error(int error, const char *file, const char *func, { fprintf(stderr, "%s:%s:%i: errno: %d/"%s"\n", file, func, line, error, strerror(error)); + + if (opt_num_xsks > 1) + remove_xdp_program(); exit(EXIT_FAILURE); }
@@ -490,6 +511,9 @@ static void xdpsock_cleanup(void) if (write(sock, &cmd, sizeof(int)) < 0) exit_with_error(errno); } + + if (opt_num_xsks > 1) + remove_xdp_program(); }
static void swap_mac_addresses(void *data) @@ -857,6 +881,10 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem, if (ret) exit_with_error(-ret);
+ ret = bpf_get_link_xdp_id(opt_ifindex, &prog_id, opt_xdp_flags); + if (ret) + exit_with_error(-ret); + xsk->app_stats.rx_empty_polls = 0; xsk->app_stats.fill_fail_polls = 0; xsk->app_stats.copy_tx_sendtos = 0; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 34d257653fb4..c6bd62f518ff 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -388,7 +388,7 @@ ifeq ($(CONFIG_LTO_CLANG) $(CONFIG_MODVERSIONS),y y) cmd_update_lto_symversions = \ rm -f $@.symversions \ $(foreach n, $(filter-out FORCE,$^), \ - $(if $(wildcard $(n).symversions), \ + $(if $(shell test -s $(n).symversions && echo y), \ ; cat $(n).symversions >> $@.symversions)) else cmd_update_lto_symversions = echo >/dev/null diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 8dbe86cf2e4f..a9fd486089a7 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -246,12 +246,18 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream) if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) return false;
- if (substream->ops->mmap || - (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV && - substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC)) + if (substream->ops->mmap) return true;
- return dma_can_mmap(substream->dma_buffer.dev.dev); + switch (substream->dma_buffer.dev.type) { + case SNDRV_DMA_TYPE_UNKNOWN: + return false; + case SNDRV_DMA_TYPE_CONTINUOUS: + case SNDRV_DMA_TYPE_VMALLOC: + return true; + default: + return dma_can_mmap(substream->dma_buffer.dev.dev); + } }
static int constrain_mask_params(struct snd_pcm_substream *substream, @@ -3057,9 +3063,14 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream, boundary = 0x7fffffff; snd_pcm_stream_lock_irq(substream); /* FIXME: we should consider the boundary for the sync from app */ - if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) - control->appl_ptr = scontrol.appl_ptr; - else + if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL)) { + err = pcm_lib_apply_appl_ptr(substream, + scontrol.appl_ptr); + if (err < 0) { + snd_pcm_stream_unlock_irq(substream); + return err; + } + } else scontrol.appl_ptr = control->appl_ptr % boundary; if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN)) control->avail_min = scontrol.avail_min; diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index d8be146793ee..c9d0ba353463 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -319,6 +319,10 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, .device = 0x4b55, }, + { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, + .device = 0x4b58, + }, #endif
/* Alder Lake */ diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c index c98ccd421a2e..2cc30a004eb7 100644 --- a/sound/isa/sb/sb16_csp.c +++ b/sound/isa/sb/sb16_csp.c @@ -814,6 +814,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); + spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock); set_mode_register(p->chip, 0xc0); /* c0 = STOP */ @@ -853,6 +854,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */ + spin_lock_irqsave(&p->chip->mixer_lock, flags); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); @@ -878,6 +880,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); + spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock); if (p->running & SNDRV_SB_CSP_ST_QSOUND) { @@ -892,6 +895,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */ + spin_lock_irqsave(&p->chip->mixer_lock, flags); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 4b2cc8cb55c4..84c088912b3c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1940,6 +1940,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) static const struct snd_pci_quirk force_connect_list[] = { SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), + SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), {} };
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1ca320fef670..19a3ae79c001 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -8566,6 +8566,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c index 3000bc128b5b..38356ea2bd6e 100644 --- a/sound/soc/codecs/rt5631.c +++ b/sound/soc/codecs/rt5631.c @@ -1695,6 +1695,8 @@ static const struct regmap_config rt5631_regmap_config = { .reg_defaults = rt5631_reg, .num_reg_defaults = ARRAY_SIZE(rt5631_reg), .cache_type = REGCACHE_RBTREE, + .use_single_read = true, + .use_single_write = true, };
static int rt5631_i2c_probe(struct i2c_client *i2c, diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 3dc119daf2f6..cef05d81c39b 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1213,7 +1213,7 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
mutex_lock(&ctl->dsp->pwr_lock);
- ret = wm_coeff_read_ctrl_raw(ctl, ctl->cache, size); + ret = wm_coeff_read_ctrl(ctl, ctl->cache, size);
if (!ret && copy_to_user(bytes, ctl->cache, size)) ret = -EFAULT; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 46513bb97904..d1c570ca21ea 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1015,6 +1015,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = -EINVAL, _ret = 0; int rollback = 0;
@@ -1055,14 +1056,23 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: - ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback); - if (ret < 0) - break; + if (rtd->dai_link->stop_dma_first) { + ret = snd_soc_pcm_component_trigger(substream, cmd, rollback); + if (ret < 0) + break;
- ret = snd_soc_pcm_component_trigger(substream, cmd, rollback); - if (ret < 0) - break; + ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback); + if (ret < 0) + break; + } else { + ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback); + if (ret < 0) + break;
+ ret = snd_soc_pcm_component_trigger(substream, cmd, rollback); + if (ret < 0) + break; + } ret = snd_soc_link_trigger(substream, cmd, rollback); break; } diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c index 88c3bf404dd7..d1fd0a330554 100644 --- a/sound/soc/sof/intel/pci-tgl.c +++ b/sound/soc/sof/intel/pci-tgl.c @@ -89,6 +89,7 @@ static const struct sof_dev_desc adls_desc = { static const struct sof_dev_desc adl_desc = { .machines = snd_soc_acpi_intel_adl_machines, .alt_machines = snd_soc_acpi_intel_adl_sdw_machines, + .use_acpi_target_states = true, .resindex_lpe_base = 0, .resindex_pcicfg_base = -1, .resindex_imr_base = -1, diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 30b3e128e28d..f4cdaf1ba44a 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -3295,7 +3295,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, { struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); static const char * const val_types[] = { - "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32", + [USB_MIXER_BOOLEAN] = "BOOLEAN", + [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN", + [USB_MIXER_S8] = "S8", + [USB_MIXER_U8] = "U8", + [USB_MIXER_S16] = "S16", + [USB_MIXER_U16] = "U16", + [USB_MIXER_S32] = "S32", + [USB_MIXER_U32] = "U32", + [USB_MIXER_BESPOKEN] = "BESPOKEN", }; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type="%s"\n", cval->head.id, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 8b8bee3c3dd6..e7accd87e063 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1897,6 +1897,9 @@ static const struct registration_quirk registration_quirks[] = { REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */ REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ + REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */ + REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */ + REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */ { 0 } /* terminator */ };
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 1828bba19020..dc6daa193557 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -222,6 +222,11 @@ int mount_bpffs_for_pin(const char *name) int err = 0;
file = malloc(strlen(name) + 1); + if (!file) { + p_err("mem alloc failed"); + return -1; + } + strcpy(file, name); dir = dirname(file);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index ddccc0eb7390..6b64cb280b2e 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -358,9 +358,10 @@ static struct dso *findnew_dso(int pid, int tid, const char *filename, dso = machine__findnew_dso_id(machine, filename, id); }
- if (dso) + if (dso) { + nsinfo__put(dso->nsinfo); dso->nsinfo = nsi; - else + } else nsinfo__put(nsi);
thread__put(thread); @@ -907,8 +908,10 @@ int cmd_inject(int argc, const char **argv)
data.path = inject.input_name; inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool); - if (IS_ERR(inject.session)) - return PTR_ERR(inject.session); + if (IS_ERR(inject.session)) { + ret = PTR_ERR(inject.session); + goto out_close_output; + }
if (zstd_init(&(inject.session->zstd_data), 0) < 0) pr_warning("Decompression initialization failed.\n"); @@ -950,5 +953,7 @@ int cmd_inject(int argc, const char **argv) out_delete: zstd_fini(&(inject.session->zstd_data)); perf_session__delete(inject.session); +out_close_output: + perf_data__close(&inject.output); return ret; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 36f9ccfeb38a..ce420f910ff8 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1167,6 +1167,8 @@ int cmd_report(int argc, const char **argv) .annotation_opts = annotation__default_options, .skip_empty = true, }; + char *sort_order_help = sort_help("sort by key(s):"); + char *field_order_help = sort_help("output field(s): overhead period sample "); const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), @@ -1201,9 +1203,9 @@ int cmd_report(int argc, const char **argv) OPT_BOOLEAN(0, "header-only", &report.header_only, "Show only data header."), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - sort_help("sort by key(s):")), + sort_order_help), OPT_STRING('F', "fields", &field_order, "key[,keys...]", - sort_help("output field(s): overhead period sample ")), + field_order_help), OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, "Show sample percentage for different cpu modes"), OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, @@ -1336,11 +1338,11 @@ int cmd_report(int argc, const char **argv) char sort_tmp[128];
if (ret < 0) - return ret; + goto exit;
ret = perf_config(report__config, &report); if (ret) - return ret; + goto exit;
argc = parse_options(argc, argv, options, report_usage, 0); if (argc) { @@ -1354,8 +1356,10 @@ int cmd_report(int argc, const char **argv) report.symbol_filter_str = argv[0]; }
- if (annotate_check_args(&report.annotation_opts) < 0) - return -EINVAL; + if (annotate_check_args(&report.annotation_opts) < 0) { + ret = -EINVAL; + goto exit; + }
if (report.mmaps_mode) report.tasks_mode = true; @@ -1369,12 +1373,14 @@ int cmd_report(int argc, const char **argv) if (symbol_conf.vmlinux_name && access(symbol_conf.vmlinux_name, R_OK)) { pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (symbol_conf.kallsyms_name && access(symbol_conf.kallsyms_name, R_OK)) { pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); - return -EINVAL; + ret = -EINVAL; + goto exit; }
if (report.inverted_callchain) @@ -1398,12 +1404,14 @@ int cmd_report(int argc, const char **argv)
repeat: session = perf_session__new(&data, false, &report.tool); - if (IS_ERR(session)) - return PTR_ERR(session); + if (IS_ERR(session)) { + ret = PTR_ERR(session); + goto exit; + }
ret = evswitch__init(&report.evswitch, session->evlist, stderr); if (ret) - return ret; + goto exit;
if (zstd_init(&(session->zstd_data), 0) < 0) pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); @@ -1638,5 +1646,8 @@ int cmd_report(int argc, const char **argv)
zstd_fini(&(session->zstd_data)); perf_session__delete(session); +exit: + free(sort_order_help); + free(field_order_help); return ret; } diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 954ce2f594e9..3e5b7faf0c16 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -3335,6 +3335,16 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options sort_dimension__add("pid", &sched->cmp_pid); }
+static bool schedstat_events_exposed(void) +{ + /* + * Select "sched:sched_stat_wait" event to check + * whether schedstat tracepoints are exposed. + */ + return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ? + false : true; +} + static int __cmd_record(int argc, const char **argv) { unsigned int rec_argc, i, j; @@ -3346,21 +3356,33 @@ static int __cmd_record(int argc, const char **argv) "-m", "1024", "-c", "1", "-e", "sched:sched_switch", - "-e", "sched:sched_stat_wait", - "-e", "sched:sched_stat_sleep", - "-e", "sched:sched_stat_iowait", "-e", "sched:sched_stat_runtime", "-e", "sched:sched_process_fork", "-e", "sched:sched_wakeup_new", "-e", "sched:sched_migrate_task", }; + + /* + * The tracepoints trace_sched_stat_{wait, sleep, iowait} + * are not exposed to user if CONFIG_SCHEDSTATS is not set, + * to prevent "perf sched record" execution failure, determine + * whether to record schedstat events according to actual situation. + */ + const char * const schedstat_args[] = { + "-e", "sched:sched_stat_wait", + "-e", "sched:sched_stat_sleep", + "-e", "sched:sched_stat_iowait", + }; + unsigned int schedstat_argc = schedstat_events_exposed() ? + ARRAY_SIZE(schedstat_args) : 0; + struct tep_event *waking_event;
/* * +2 for either "-e", "sched:sched_wakeup" or * "-e", "sched:sched_waking" */ - rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1; + rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1; rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL) @@ -3376,6 +3398,9 @@ static int __cmd_record(int argc, const char **argv) else rec_argv[i++] = strdup("sched:sched_wakeup");
+ for (j = 0; j < schedstat_argc; j++) + rec_argv[i++] = strdup(schedstat_args[j]); + for (j = 1; j < (unsigned int)argc; j++, i++) rec_argv[i] = argv[j];
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 1280cbfad4db..c43c2963117d 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2534,6 +2534,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script) } }
+static void perf_script__exit(struct perf_script *script) +{ + perf_thread_map__put(script->threads); + perf_cpu_map__put(script->cpus); +} + static int __cmd_script(struct perf_script *script) { int ret; @@ -3991,8 +3997,10 @@ int cmd_script(int argc, const char **argv) zfree(&script.ptime_range); }
+ zstd_fini(&(session->zstd_data)); evlist__free_stats(session->evlist); perf_session__delete(session); + perf_script__exit(&script);
if (script_started) cleanup_scripting(); diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c index 656218179222..44a50527f9d9 100644 --- a/tools/perf/tests/event_update.c +++ b/tools/perf/tests/event_update.c @@ -88,6 +88,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu struct evsel *evsel; struct event_name tmp; struct evlist *evlist = evlist__new_default(); + char *unit = strdup("KRAVA");
TEST_ASSERT_VAL("failed to get evlist", evlist);
@@ -98,7 +99,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
- evsel->unit = strdup("KRAVA"); + evsel->unit = unit;
TEST_ASSERT_VAL("failed to synthesize attr update unit", !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit)); @@ -118,6 +119,7 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu TEST_ASSERT_VAL("failed to synthesize attr update cpus", !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
- perf_cpu_map__put(evsel->core.own_cpus); + free(unit); + evlist__delete(evlist); return 0; } diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c index edcbc70ff9d6..1ac72919fa35 100644 --- a/tools/perf/tests/maps.c +++ b/tools/perf/tests/maps.c @@ -116,5 +116,7 @@ int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unus
ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps); TEST_ASSERT_VAL("merge check failed", !ret); + + maps__exit(&maps); return TEST_OK; } diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index ec4e3b21b831..b5efe675b321 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -61,6 +61,7 @@ static int session_write_header(char *path) TEST_ASSERT_VAL("failed to write header", !perf_session__write_header(session, session->evlist, data.file.fd, true));
+ evlist__delete(session->evlist); perf_session__delete(session);
return 0; diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index 8fca4779ae6a..70b91ce35178 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -20,7 +20,7 @@
static void close_dir(struct perf_data_file *files, int nr) { - while (--nr >= 1) { + while (--nr >= 0) { close(files[nr].fd); zfree(&files[nr].path); } diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index d786cf6b0cfa..ee15db2be2f4 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -1154,8 +1154,10 @@ struct map *dso__new_map(const char *name) struct map *map = NULL; struct dso *dso = dso__new(name);
- if (dso) + if (dso) { map = map__new2(0, dso); + dso__put(dso); + }
return map; } diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index bc5e4f294e9e..16a111b62cc3 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -186,10 +186,12 @@ void perf_env__exit(struct perf_env *env) zfree(&env->cpuid); zfree(&env->cmdline); zfree(&env->cmdline_argv); + zfree(&env->sibling_dies); zfree(&env->sibling_cores); zfree(&env->sibling_threads); zfree(&env->pmu_mappings); zfree(&env->cpu); + zfree(&env->cpu_pmu_caps); zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++) diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c index 39062df02629..51424cdc3b68 100644 --- a/tools/perf/util/lzma.c +++ b/tools/perf/util/lzma.c @@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (ferror(infile)) { pr_err("lzma: read error: %s\n", strerror(errno)); - goto err_fclose; + goto err_lzma_end; }
if (feof(infile)) @@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (writen(output_fd, buf_out, write_size) != write_size) { pr_err("lzma: write error: %s\n", strerror(errno)); - goto err_fclose; + goto err_lzma_end; }
strm.next_out = buf_out; @@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd) break;
pr_err("lzma: failed %s\n", lzma_strerror(ret)); - goto err_fclose; + goto err_lzma_end; } }
err = 0; +err_lzma_end: + lzma_end(&strm); err_fclose: fclose(infile); return err; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 8af693d9678c..72e7f3616157 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -192,6 +192,8 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (!(prot & PROT_EXEC)) dso__set_loaded(dso); } + + nsinfo__put(dso->nsinfo); dso->nsinfo = nsi;
if (build_id__is_defined(bid)) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index a78c8d59a555..9cc89a047b15 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -180,8 +180,10 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user) struct map *map;
map = dso__new_map(target); - if (map && map->dso) + if (map && map->dso) { + nsinfo__put(map->dso->nsinfo); map->dso->nsinfo = nsinfo__get(nsi); + } return map; } else { return kernel_get_module_map(target); diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index 52273542e6ef..3f6de459ac2b 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -342,11 +342,11 @@ int probe_file__del_events(int fd, struct strfilter *filter)
ret = probe_file__get_events(fd, filter, namelist); if (ret < 0) - return ret; + goto out;
ret = probe_file__del_strlist(fd, namelist); +out: strlist__delete(namelist); - return ret; }
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 88ce47f2547e..568a88c001c6 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -3370,7 +3370,7 @@ static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int add_key(sb, s[i].name, llen); }
-const char *sort_help(const char *prefix) +char *sort_help(const char *prefix) { struct strbuf sb; char *s; diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 87a092645aa7..b67c469aba79 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -302,7 +302,7 @@ void reset_output_field(void); void sort__setup_elide(FILE *fp); void perf_hpp__set_elide(int idx, bool elide);
-const char *sort_help(const char *prefix); +char *sort_help(const char *prefix);
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index bf361f30d6ef..104a7a5f13b1 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -309,9 +309,10 @@ check_exception() fi log_test $? 0 "IPv4: ${desc}"
- if [ "$with_redirect" = "yes" ]; then + # No PMTU info for test "redirect" and "mtu exception plus redirect" + if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \ - grep -q "${H2_N2_IP6} from :: via ${R2_LLADDR} dev br0.*${mtu}" + grep -v "mtu" | grep -q "${H2_N2_IP6} .*via ${R2_LLADDR} dev br0" elif [ -n "${mtu}" ]; then ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \ grep -q "${mtu}" diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index fd99485cf2a4..e8ac852c6ff6 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -1341,7 +1341,7 @@ syncookies_tests() ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow run_tests $ns1 $ns2 10.0.1.1 - chk_join_nr "subflows limited by server w cookies" 2 2 1 + chk_join_nr "subflows limited by server w cookies" 2 1 1
# test signal address with cookies reset_with_cookies diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index f5ab5e0312e7..3e1fef3d8104 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -197,8 +197,10 @@ static int anon_release_pages(char *rel_area)
static void anon_allocate_area(void **alloc_area) { - if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) { - fprintf(stderr, "out of memory\n"); + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (*alloc_area == MAP_FAILED) + fprintf(stderr, "mmap of anonymous memory failed"); *alloc_area = NULL; } }
linux-stable-mirror@lists.linaro.org