I'm announcing the release of the 6.6.87 kernel.
All users of the 6.6 kernel series must upgrade.
The updated 6.6.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.6.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 Makefile | 2 arch/arm64/kernel/compat_alignment.c | 2 arch/loongarch/Kconfig | 4 arch/loongarch/include/asm/cache.h | 2 arch/loongarch/kernel/kgdb.c | 5 arch/loongarch/net/bpf_jit.c | 12 arch/loongarch/net/bpf_jit.h | 5 arch/powerpc/configs/mpc885_ads_defconfig | 2 arch/powerpc/platforms/cell/spufs/gang.c | 1 arch/powerpc/platforms/cell/spufs/inode.c | 63 ++- arch/powerpc/platforms/cell/spufs/spufs.h | 2 arch/riscv/errata/Makefile | 6 arch/riscv/include/asm/ftrace.h | 4 arch/riscv/kvm/vcpu_pmu.c | 1 arch/riscv/mm/hugetlbpage.c | 76 ++- arch/um/include/shared/os.h | 1 arch/um/kernel/Makefile | 2 arch/um/kernel/maccess.c | 19 arch/um/os-Linux/process.c | 51 -- arch/x86/Kconfig | 2 arch/x86/entry/calling.h | 2 arch/x86/events/intel/core.c | 43 +- arch/x86/events/intel/ds.c | 13 arch/x86/events/perf_event.h | 3 arch/x86/hyperv/hv_vtl.c | 1 arch/x86/hyperv/ivm.c | 5 arch/x86/include/asm/tlbflush.h | 2 arch/x86/kernel/cpu/microcode/amd.c | 2 arch/x86/kernel/cpu/sgx/driver.c | 10 arch/x86/kernel/dumpstack.c | 5 arch/x86/kernel/fpu/core.c | 6 arch/x86/kernel/process.c | 7 arch/x86/kernel/traps.c | 18 arch/x86/kernel/tsc.c | 4 arch/x86/lib/copy_user_64.S | 18 arch/x86/mm/mem_encrypt_identity.c | 4 arch/x86/mm/pat/cpa-test.c | 2 arch/x86/mm/pat/memtype.c | 52 +- drivers/acpi/nfit/core.c | 2 drivers/acpi/processor_idle.c | 4 drivers/acpi/resource.c | 7 drivers/acpi/x86/utils.c | 3 drivers/base/power/main.c | 21 - drivers/base/power/runtime.c | 2 drivers/clk/imx/clk-imx8mp-audiomix.c | 6 drivers/clk/meson/g12a.c | 38 + drivers/clk/meson/gxbb.c | 14 drivers/clk/qcom/gcc-msm8953.c | 2 drivers/clk/qcom/mmcc-sdm660.c | 2 drivers/clk/rockchip/clk-rk3328.c | 2 drivers/clk/samsung/clk.c | 2 drivers/cpufreq/cpufreq_governor.c | 45 +- drivers/cpufreq/scpi-cpufreq.c | 5 drivers/crypto/hisilicon/sec2/sec.h | 1 drivers/crypto/hisilicon/sec2/sec_crypto.c | 125 ++---- drivers/crypto/nx/nx-common-pseries.c | 37 - drivers/dma/fsl-edma-main.c | 2 drivers/edac/i10nm_base.c | 2 drivers/edac/ie31200_edac.c | 19 drivers/edac/skx_common.c | 33 + drivers/edac/skx_common.h | 11 drivers/firmware/cirrus/cs_dsp.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 11 drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 15 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 16 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c | 3 drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c | 4 drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 12 drivers/gpu/drm/bridge/ite-it6505.c | 7 drivers/gpu/drm/bridge/ti-sn65dsi86.c | 2 drivers/gpu/drm/display/drm_dp_mst_topology.c | 8 drivers/gpu/drm/mediatek/mtk_dp.c | 6 drivers/gpu/drm/mediatek/mtk_dsi.c | 6 drivers/gpu/drm/mediatek/mtk_hdmi.c | 33 + drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 4 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 3 drivers/gpu/drm/msm/dsi/dsi_host.c | 8 drivers/gpu/drm/msm/dsi/dsi_manager.c | 32 + drivers/gpu/drm/msm/msm_dsc_helper.h | 11 drivers/gpu/drm/vkms/vkms_drv.c | 15 drivers/gpu/drm/xlnx/zynqmp_dpsub.c | 2 drivers/hid/Makefile | 1 drivers/hid/i2c-hid/i2c-hid-core.c | 2 drivers/hwmon/nct6775-core.c | 4 drivers/hwtracing/coresight/coresight-catu.c | 2 drivers/hwtracing/coresight/coresight-core.c | 20 drivers/hwtracing/coresight/coresight-etm4x-core.c | 48 ++ drivers/i3c/master/svc-i3c-master.c | 2 drivers/iio/accel/mma8452.c | 10 drivers/iio/accel/msa311.c | 26 - drivers/iio/adc/ad4130.c | 41 +- drivers/iio/adc/ad7124.c | 35 + drivers/infiniband/core/device.c | 9 drivers/infiniband/core/mad.c | 38 - drivers/infiniband/core/sysfs.c | 1 drivers/infiniband/hw/erdma/erdma_cm.c | 1 drivers/infiniband/hw/mana/main.c | 2 drivers/infiniband/hw/mlx5/cq.c | 2 drivers/infiniband/hw/mlx5/odp.c | 10 drivers/leds/led-core.c | 22 - drivers/media/dvb-frontends/dib8000.c | 5 drivers/media/platform/allegro-dvt/allegro-core.c | 1 drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c | 1 drivers/media/rc/streamzap.c | 2 drivers/memory/omap-gpmc.c | 20 drivers/mfd/sm501.c | 6 drivers/mmc/host/omap.c | 19 drivers/mmc/host/sdhci-omap.c | 4 drivers/mmc/host/sdhci-pxav3.c | 1 drivers/net/arcnet/com20020-pci.c | 17 drivers/net/dsa/mv88e6xxx/chip.c | 11 drivers/net/dsa/mv88e6xxx/phy.c | 3 drivers/net/ethernet/ibm/ibmveth.c | 39 + drivers/net/ethernet/intel/e1000e/defines.h | 3 drivers/net/ethernet/intel/e1000e/ich8lan.c | 80 +++ drivers/net/ethernet/intel/e1000e/ich8lan.h | 4 drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 3 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 3 drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c | 201 ++++++---- drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 2 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c | 2 drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 8 drivers/net/usb/rndis_host.c | 16 drivers/net/usb/usbnet.c | 6 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 20 drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 86 ++-- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 8 drivers/ntb/hw/intel/ntb_hw_gen3.c | 3 drivers/ntb/hw/mscc/ntb_hw_switchtec.c | 2 drivers/ntb/test/ntb_perf.c | 4 drivers/nvme/host/pci.c | 34 + drivers/nvme/host/tcp.c | 5 drivers/pci/controller/cadence/pcie-cadence-ep.c | 3 drivers/pci/controller/cadence/pcie-cadence.h | 2 drivers/pci/controller/dwc/pcie-histb.c | 12 drivers/pci/controller/pcie-brcmstb.c | 9 drivers/pci/controller/pcie-xilinx-cpm.c | 10 drivers/pci/hotplug/pciehp_hpc.c | 4 drivers/pci/pci.c | 4 drivers/pci/pcie/aspm.c | 17 drivers/pci/pcie/portdrv.c | 8 drivers/pci/probe.c | 5 drivers/pci/setup-bus.c | 3 drivers/pinctrl/intel/pinctrl-intel.c | 1 drivers/pinctrl/renesas/pinctrl-rza2.c | 2 drivers/pinctrl/renesas/pinctrl-rzg2l.c | 2 drivers/pinctrl/renesas/pinctrl-rzv2m.c | 2 drivers/pinctrl/tegra/pinctrl-tegra.c | 3 drivers/platform/x86/dell/dell-wmi-ddv.c | 6 drivers/platform/x86/intel/hid.c | 7 drivers/platform/x86/intel/speed_select_if/isst_if_common.c | 2 drivers/platform/x86/intel/vsec.c | 7 drivers/power/supply/max77693_charger.c | 2 drivers/remoteproc/qcom_q6v5_mss.c | 21 - drivers/remoteproc/qcom_q6v5_pas.c | 12 drivers/remoteproc/remoteproc_core.c | 1 drivers/scsi/qla2xxx/qla_os.c | 2 drivers/soundwire/slave.c | 1 drivers/staging/rtl8723bs/Kconfig | 1 drivers/thermal/intel/int340x_thermal/int3402_thermal.c | 3 drivers/tty/n_tty.c | 13 drivers/usb/host/xhci-mem.c | 6 drivers/usb/typec/ucsi/ucsi_ccg.c | 5 drivers/vhost/scsi.c | 25 - drivers/video/console/Kconfig | 2 drivers/video/fbdev/au1100fb.c | 4 drivers/video/fbdev/sm501fb.c | 7 fs/affs/file.c | 9 fs/btrfs/extent-tree.c | 5 fs/exec.c | 15 fs/exfat/fatent.c | 2 fs/ext4/dir.c | 3 fs/ext4/super.c | 27 - fs/fuse/dax.c | 1 fs/fuse/dir.c | 2 fs/fuse/file.c | 4 fs/hostfs/hostfs.h | 2 fs/hostfs/hostfs_kern.c | 7 fs/hostfs/hostfs_user.c | 59 +- fs/isofs/dir.c | 3 fs/jfs/jfs_dtree.c | 3 fs/jfs/xattr.c | 15 fs/nfs/delegation.c | 33 - fs/nfs/sysfs.c | 22 + fs/nfsd/nfs4state.c | 31 + fs/ntfs3/index.c | 4 fs/ntfs3/ntfs.h | 2 fs/ocfs2/alloc.c | 8 fs/proc/base.c | 2 fs/smb/client/cifsacl.c | 8 fs/smb/client/connect.c | 16 fs/smb/server/auth.c | 6 fs/smb/server/mgmt/user_session.c | 33 + fs/smb/server/mgmt/user_session.h | 2 fs/smb/server/oplock.c | 12 fs/smb/server/smb2pdu.c | 40 + fs/smb/server/smbacl.c | 5 include/drm/display/drm_dp_mst_helper.h | 7 include/linux/context_tracking_irq.h | 8 include/linux/coresight.h | 4 include/linux/fwnode.h | 2 include/linux/interrupt.h | 8 include/linux/pgtable.h | 28 + include/linux/pm_runtime.h | 2 include/linux/rcupdate.h | 2 include/linux/sched/smt.h | 2 include/linux/trace.h | 4 include/linux/trace_events.h | 14 include/rdma/ib_verbs.h | 1 kernel/events/core.c | 46 +- kernel/events/ring_buffer.c | 2 kernel/events/uprobes.c | 13 kernel/fork.c | 4 kernel/kexec_elf.c | 2 kernel/locking/semaphore.c | 13 kernel/sched/deadline.c | 2 kernel/trace/bpf_trace.c | 2 kernel/trace/ring_buffer.c | 4 kernel/trace/trace.c | 23 - kernel/trace/trace.h | 1 kernel/trace/trace_boot.c | 2 kernel/trace/trace_events.c | 62 ++- kernel/trace/trace_events_hist.c | 133 +++++- kernel/trace/trace_events_synth.c | 36 + kernel/trace/trace_functions_graph.c | 1 kernel/trace/trace_irqsoff.c | 2 kernel/trace/trace_osnoise.c | 1 kernel/trace/trace_sched_wakeup.c | 2 kernel/watch_queue.c | 9 lib/842/842_compress.c | 2 lib/overflow_kunit.c | 3 mm/memory.c | 13 net/can/af_can.c | 12 net/can/af_can.h | 12 net/can/proc.c | 46 +- net/core/dst.c | 8 net/core/rtnetlink.c | 3 net/ipv4/ip_tunnel_core.c | 4 net/ipv4/udp.c | 16 net/ipv6/addrconf.c | 37 + net/ipv6/calipso.c | 21 - net/ipv6/route.c | 42 +- net/mac80211/sta_info.c | 20 net/netfilter/nf_tables_api.c | 4 net/netfilter/nft_set_hash.c | 3 net/netfilter/nft_tunnel.c | 6 net/openvswitch/actions.c | 6 net/sched/act_tunnel_key.c | 2 net/sched/cls_flower.c | 2 net/sched/sch_skbprio.c | 3 net/vmw_vsock/af_vsock.c | 6 samples/ftrace/sample-trace-array.c | 2 scripts/selinux/install_policy.sh | 15 security/smack/smack.h | 6 security/smack/smack_lsm.c | 10 sound/pci/hda/patch_realtek.c | 33 + sound/soc/codecs/cs35l41-spi.c | 4 sound/soc/codecs/rt5665.c | 24 - sound/soc/fsl/imx-card.c | 4 sound/soc/ti/j721e-evm.c | 2 tools/lib/bpf/linker.c | 2 tools/objtool/check.c | 35 - tools/perf/bench/syscall.c | 22 - tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S | 2 tools/perf/util/arm-spe.c | 8 tools/perf/util/evlist.c | 13 tools/perf/util/pmu.c | 7 tools/perf/util/pmu.h | 5 tools/perf/util/pmus.c | 20 tools/perf/util/python.c | 17 tools/perf/util/stat-shadow.c | 3 tools/perf/util/units.c | 2 tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c | 5 tools/testing/selftests/bpf/progs/strncmp_bench.c | 5 tools/testing/selftests/mm/cow.c | 2 278 files changed, 2329 insertions(+), 1175 deletions(-)
Acs, Jakub (1): ext4: fix OOB read when checking dotdot dir
Al Viro (3): spufs: fix a leak on spufs_new_file() failure spufs: fix gang directory lifetimes spufs: fix a leak in spufs_create_context()
Alex Deucher (1): drm/amdgpu/gfx11: fix num_mec
Alex Hung (1): drm/amd/display: Check link_index before accessing dc->links[]
Alexandre Ghiti (1): riscv: Fix hugetlb retrieval of number of ptes in case of !present pte
Alistair Popple (1): fuse: fix dax truncate/punch_hole fault path
Andrii Nakryiko (1): libbpf: Fix hypothetical STT_SECTION extern NULL deref case
Andy Shevchenko (1): pinctrl: intel: Fix wrong bypass assignment in intel_pinctrl_probe_pwm()
AngeloGioacchino Del Regno (2): drm/mediatek: mtk_hdmi: Unregister audio platform device on failure drm/mediatek: mtk_hdmi: Fix typo for aud_sampe_size member
Angelos Oikonomopoulos (1): arm64: Don't call NULL in do_compat_alignment_fixup()
Anshuman Khandual (1): arch/powerpc: drop GENERIC_PTDUMP from mpc885_ads_defconfig
Antheas Kapenekakis (1): ALSA: hda/realtek: Fix Asus Z13 2025 audio
Antoine Tenart (1): net: decrease cached dst counters in dst_release
Armin Wolf (1): platform/x86: dell-ddv: Fix temperature calculation
Arnaldo Carvalho de Melo (5): perf units: Fix insufficient array space perf python: Fixup description of sample.id event member perf python: Decrement the refcount of just created event on failure perf python: Don't keep a raw_data pointer to consumed ring buffer space perf python: Check if there is space to copy all the event
Arnd Bergmann (2): x86/platform: Only allow CONFIG_EISA for 32-bit mdacon: rework dependency list
Artur Weber (1): power: supply: max77693: Fix wrong conversion of charge input threshold value
Atish Patra (1): RISC-V: KVM: Disable the kernel perf counter during configure
Barnabás Czémán (1): clk: qcom: mmcc-sdm660: fix stuck video_subcore0 clock
Bart Van Assche (1): fs/procfs: fix the comment above proc_pid_wchan()
Benjamin Berg (3): x86/fpu: Avoid copying dynamic FP state from init_task in arch_dup_task_struct() um: remove copy_from_kernel_nofault_allowed um: hostfs: avoid issues on inode number reuse by host
Benjamin Gaignard (1): media: verisilicon: HEVC: Initialize start_bit field
Boris Ostrovsky (1): x86/microcode/AMD: Fix __apply_microcode_amd()'s return value
Chao Gao (1): x86/fpu/xstate: Fix inconsistencies in guest FPU xfeatures
Cheng Xu (1): RDMA/erdma: Prevent use-after-free in erdma_accept_newconn()
Chenyuan Yang (1): thermal: int340x: Add NULL check for adev
Chiara Meiohas (1): RDMA/mlx5: Fix calculation of total invalidated pages
Christophe JAILLET (2): PCI: histb: Fix an error handling path in histb_pcie_probe() ASoC: codecs: rt5665: Fix some error handling paths in rt5665_probe()
Chuck Lever (1): NFSD: Skip sending CB_RECALL_ANY when the backchannel isn't up
Cong Wang (1): net_sched: skbprio: Remove overly strict queue assertions
Cyan Yang (1): selftests/mm/cow: fix the incorrect error handling
Dan Carpenter (4): PCI: Remove stray put_device() in pci_register_host_bridge() drm/mediatek: dsi: fix error codes in mtk_dsi_host_transfer() fs/ntfs3: Fix a couple integer overflows on 32bit systems fs/ntfs3: Prevent integer overflow in hdr_first_de()
Daniel Bárta (1): ALSA: hda: Fix speakers on ASUS EXPERTBOOK P5405CSA 1.0
Daniel Stodden (1): PCI/ASPM: Fix link state exit during switch upstream function removal
Danila Chernetsov (1): fbdev: sm501fb: Add some geometry checks.
Dave Marquardt (1): net: ibmveth: make veth_pool_store stop hanging
David E. Box (1): platform/x86/intel/vsec: Add Diamond Rapids support
David Hildenbrand (2): x86/mm/pat: Fix VM_PAT handling when fork() fails in copy_page_range() kernel/events/uprobes: handle device-exclusive entries correctly in __replace_page()
David Laight (1): objtool: Fix verbose disassembly if CROSS_COMPILE isn't set
David Oberhollenzer (1): net: dsa: mv88e6xxx: propperly shutdown PPU re-enable timer on destroy
Debin Zhu (1): netlabel: Fix NULL pointer exception caused by CALIPSO on IPv4 sockets
Dmitry Baryshkov (1): drm/msm/dpu: don't use active in atomic_check()
Dmitry Panchenko (1): platform/x86: intel-hid: fix volume buttons on Microsoft Surface Go 4 tablet
Douglas Anderson (1): drm/mediatek: dp: drm_err => dev_err in HPD path to avoid NULL ptr
Douglas Raillard (2): tracing: Ensure module defining synth event cannot be unloaded while tracing tracing: Fix synth event printk format for str fields
Emmanuel Grumbach (2): wifi: iwlwifi: mvm: use the right version of the rate API wifi: mac80211: flush the station before moving it to UN-AUTHORIZED state
Eric Sandeen (1): watch_queue: fix pipe accounting mismatch
Fabrizio Castro (3): pinctrl: renesas: rza2: Fix missing of_node_put() call pinctrl: renesas: rzg2l: Fix missing of_node_put() call pinctrl: renesas: rzv2m: Fix missing of_node_put() call
Feng Tang (1): PCI/portdrv: Only disable pciehp interrupts early when needed
Feng Yang (1): ring-buffer: Fix bytes_dropped calculation issue
Fernando Fernandez Mancera (1): ipv6: fix omitted netlink attributes when using RTEXT_FILTER_SKIP_STATS
Florian Westphal (1): netfilter: nf_tables: don't unregister hook when table is dormant
Geert Uytterhoeven (1): drm/bridge: ti-sn65dsi86: Fix multiple instances
Geetha sowjanya (2): octeontx2-af: Fix mbox INTR handler when num VFs > 64 octeontx2-af: Free NIX_AF_INT_VEC_GEN irq
Giovanni Gherdovich (1): ACPI: processor: idle: Return an error if both P_LVL{2,3} idle states are invalid
Greg Kroah-Hartman (1): Linux 6.6.87
Guilherme G. Piccoli (1): x86/tsc: Always save/restore TSC sched_clock() on suspend/resume
Guillaume Nault (1): tunnels: Accept PACKET_HOST in skb_tunnel_check_pmtu().
Hans Zhang (1): PCI: cadence-ep: Fix the driver to send MSG TLP for INTx without data payload
Hans de Goede (1): ACPI: x86: Extend Lenovo Yoga Tab 3 quirk with skip GPIO event-handlers
Hengqi Chen (3): LoongArch: BPF: Fix off-by-one error in build_prologue() LoongArch: BPF: Don't override subprog's return value LoongArch: BPF: Use move_addr() for BPF_PSEUDO_FUNC
Henry Martin (2): ASoC: imx-card: Add NULL check in imx_card_probe() arcnet: Add NULL check in com20020pci_probe()
Herbert Xu (1): crypto: nx - Fix uninitialised hv_nxc on error
Hermes Wu (1): drm/bridge: it6505: fix HDCP V match check is not performed correctly
Herton R. Krzesinski (1): x86/uaccess: Improve performance by aligning writes to 8 bytes in copy_user_generic(), on non-FSRM/ERMS CPUs
Hou Tao (1): bpf: Use preempt_count() directly in bpf_send_signal_common()
Huacai Chen (1): LoongArch: Increase ARCH_DMA_MINALIGN up to 16
Ian Rogers (2): perf stat: Fix find_stat for mixed legacy/non-legacy events perf evlist: Add success path to evlist__create_syswide_maps
Icenowy Zheng (2): nvme-pci: clean up CMBMSC when registering CMB fails nvme-pci: skip CMB blocks incompatible with PCI P2P DMA
Ido Schimmel (2): ipv6: Start path selection from the first nexthop ipv6: Do not consider link down nexthops in path selection
Ilkka Koskinen (1): coresight: catu: Fix number of pages while using 64k pages
Ilpo Järvinen (1): PCI: pciehp: Don't enable HPIE when resuming in poll mode
Ivan Orlov (1): kunit/overflow: Fix UB in overflow_allocation_test
James Clark (1): perf pmu: Don't double count common sysfs and json events
Jann Horn (3): x86/entry: Fix ORC unwinder for PUSH_REGS with save_ret=1 x86/dumpstack: Fix inaccurate unwinding from exception stacks due to misplaced assignment x86/mm: Fix flush_tlb_range() when used for zapping normal PMDs
Jayesh Choudhary (1): ASoC: ti: j721e-evm: Fix clock configuration for ti,j7200-cpb-audio compatible
Jerome Brunet (4): clk: amlogic: gxbb: drop incorrect flag on 32k clock clk: amlogic: g12b: fix cluster A parent data clk: amlogic: gxbb: drop non existing 32k clock parent clk: amlogic: g12a: fix mmc A peripheral clock
Jie Zhan (1): cpufreq: governor: Fix negative 'idle_time' handling in dbs_update()
Jim Quinlan (3): PCI: brcmstb: Use internal register to change link capability PCI: brcmstb: Fix error path after a call to regulator_bulk_get() PCI: brcmstb: Fix potential premature regulator disabling
Jiri Kosina (1): HID: remove superfluous (and wrong) Makefile entry for CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER
Jiri Slaby (SUSE) (1): tty: n_tty: use uint for space returned by tty_write_room()
Joe Hattori (2): media: platform: allgro-dvt: unregister v4l2_device on the error path soundwire: slave: fix an OF node reference leak in soundwire slave device
Johannes Berg (1): wifi: iwlwifi: fw: allocate chained SG tables for dump
Jonathan Cameron (2): iio: accel: mma8452: Ensure error return on failure to matching oversampling ratio iio: accel: msa311: Fix failure to release runtime pm if direct mode claim fails.
Josef Bacik (1): btrfs: handle errors from btrfs_dec_ref() properly
Josh Poimboeuf (6): x86/traps: Make exc_double_fault() consistently noreturn objtool, media: dib8000: Prevent divide-by-zero in dib8000_set_dds() objtool: Fix segfault in ignore_unreachable_insn() sched/smt: Always inline sched_smt_active() context_tracking: Always inline ct_{nmi,irq}_{enter,exit}() rcu-tasks: Always inline rcu_irq_work_resched()
José Expósito (1): drm/vkms: Fix use after free and double free on init error
Juhan Jin (1): riscv: ftrace: Add parentheses in macro definitions of make_call_t0 and make_call_ra
Kai-Heng Feng (1): PCI: Use downstream bridges for distributing resources
Kan Liang (1): perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read
Karel Balej (1): mmc: sdhci-pxav3: set NEED_RSP_BUSY capability
Kees Bakker (1): RDMA/mana_ib: Ensure variable err is initialized
Keith Busch (1): nvme-pci: fix stuck reset on concurrent DPC and HP
Kevin Loughlin (1): x86/sev: Add missing RIP_REL_REF() invocations during sme_enable()
Konstantin Andreev (1): smack: dont compile ipv6 code unless ipv6 is configured
Kuniyuki Iwashima (1): udp: Fix memory accounting leak.
Lama Kayal (1): net/mlx5e: SHAMPO, Make reserved size independent of page size
Laurentiu Mihalcea (1): clk: clk-imx8mp-audiomix: fix dsp/ocram_a clock parents
Leo Yan (1): perf arm-spe: Fix load-store operation checking
Li Lingfeng (1): nfsd: put dl_stid if fail to queue dl_recall
Lin Ma (2): netfilter: nft_tunnel: fix geneve_opt type confusion addition net: fix geneve_opt length integer overflow
Lubomir Rintel (1): rndis_host: Flag RNDIS modems as WWAN devices
Luca Weiss (3): remoteproc: qcom_q6v5_pas: Make single-PD handling more robust remoteproc: qcom_q6v5_pas: Use resource with CX PD for MSM8226 remoteproc: qcom_q6v5_mss: Handle platforms with one power domain
Maher Sanalla (1): IB/mad: Check available slots before posting receive WRs
Marcus Meissner (1): perf tools: annotate asm_pure_loop.S
Marijn Suijten (2): drm/msm/dsi: Use existing per-interface slice count in DSC timing drm/msm/dsi: Set PHY usescase (and mode) before registering DSI host
Mario Limonciello (2): ucsi_ccg: Don't show failed to get FW build information error drm/amd: Keep display off while going into S4
Mark Zhang (1): rtnetlink: Allocate vfinfo size for VF GUIDs when supported
Markus Elfring (2): fbdev: au1100fb: Move a variable assignment behind a null pointer check ntb_perf: Delete duplicate dmaengine_unmap_put() call in perf_copy_chunk()
Masami Hiramatsu (Google) (2): tracing/hist: Add poll(POLLIN) support on hist file tracing/hist: Support POLLPRI event for poll on histogram
Matthias Proske (1): wifi: brcmfmac: keep power during suspend if board requires it
Maud Spierings (1): dt-bindings: vendor-prefixes: add GOcontroll
Miaoqian Lin (2): ksmbd: use aead_request_free to match aead_request_alloc mmc: omap: Fix memory leak in mmc_omap_new_slot
Michael Kelley (1): x86/hyperv: Fix output argument to hypercall that changes page visibility
Mike Christie (1): vhost-scsi: Fix handling of multiple calls to vhost_scsi_set_endpoint
Mike Rapoport (Microsoft) (1): x86/mm/pat: cpa-test: fix length for CPA_ARRAY test
Murad Masimov (2): acpi: nfit: fix narrowing conversion in acpi_nfit_ctl media: streamzap: fix race between device disconnection and urb callback
Naman Jain (1): x86/hyperv/vtl: Stop kernel from probing VTL0 low memory
Namjae Jeon (6): ksmbd: fix multichannel connection failure ksmbd: fix r_count dec/increment mismatch cifs: fix incorrect validation for num_aces field of smb_acl ksmbd: add bounds check for durable handle context ksmbd: fix use-after-free in ksmbd_sessions_deregister() ksmbd: fix session use-after-free in multichannel connection
Navon John Lukose (1): ALSA: hda/realtek: Add mute LED quirk for HP Pavilion x360 14-dy1xxx
Nikita Shubin (1): ntb: intel: Fix using link status DB's
Nikita Zhandarovich (1): mfd: sm501: Switch to BIT() to mitigate integer overflows
Niklas Neronin (1): usb: xhci: correct debug message page size calculation
Nishanth Aravamudan (1): PCI: Avoid reset when disabled via sysfs
Norbert Szetei (2): ksmbd: add bounds check for create lease context ksmbd: validate zero num_subauth before sub_auth is accessed
Oleg Nesterov (1): exec: fix the racy usage of fs_struct->in_exec
Oliver Hartkopp (1): can: statistics: use atomic access in hot path
Pablo Neira Ayuso (1): netfilter: nft_set_hash: GC reaps elements with conncount for dynamic sets only
Palmer Dabbelt (1): RISC-V: errata: Use medany for relocatable builds
Patrisious Haddad (1): RDMA/mlx5: Fix mlx5_poll_one() cur_qp update flow
Paul Menzel (1): ACPI: resource: Skip IRQ override on ASUS Vivobook 14 X1404VAP
Peng Fan (2): remoteproc: core: Clear table_sz when rproc_shutdown dmaengine: fsl-edma: cleanup chan after dma_async_device_unregister
Peter Geis (1): clk: rockchip: rk3328: fix wrong clk_ref_usb3otg parent
Peter Zijlstra (2): lockdep/mm: Fix might_fault() lockdep check of current->mm->mmap_lock perf/core: Fix perf_pmu_register() vs. perf_init_event()
Peter Zijlstra (Intel) (1): perf/x86/intel: Apply static call for drain_pebs
Prathamesh Shete (1): pinctrl: tegra: Set SFIO mode to Mux Register
Qasim Ijaz (2): isofs: fix KMSAN uninit-value bug in do_isofs_readdir() jfs: fix slab-out-of-bounds read in ea_get()
Qiuxu Zhuo (4): EDAC/{skx_common,i10nm}: Fix some missing error reports on Emerald Rapids EDAC/ie31200: Fix the size of EDAC_MC_LAYER_CHIP_SELECT layer EDAC/ie31200: Fix the DIMM size mask for several SoCs EDAC/ie31200: Fix the error path order of ie31200_init()
Rafael J. Wysocki (2): PM: sleep: Adjust check before setting power.must_resume PM: sleep: Fix handling devices with direct_complete set on errors
Ran Xiaokai (1): tracing/osnoise: Fix possible recursive locking for cpus_read_lock()
Remi Pommarel (1): leds: Fix LED_OFF brightness race
Richard Fitzgerald (1): firmware: cs_dsp: Ensure cs_dsp_load[_coeff]() returns 0 on success
Roger Quadros (1): memory: omap-gpmc: drop no compatible check
Roman Gushchin (1): RDMA/core: Don't expose hw_counters outside of init net namespace
Roman Smirnov (1): jfs: add index corruption check to DT_GETPAGE()
Sagi Grimberg (1): nvme-tcp: fix possible UAF in nvme_tcp_poll
Saket Kumar Bhaskar (1): selftests/bpf: Select NUMA_NO_NODE to create map
Sebastian Andrzej Siewior (1): lockdep: Don't disable interrupts on RT in disable_irq_nosync_lockdep.*()
Shrikanth Hegde (1): sched/deadline: Use online cpus for validating runtime
Simon Tatham (2): affs: generate OFS sequence numbers starting at 1 affs: don't write overlarge OFS data block size fields
Sourabh Jain (1): kexec: initialize ELF lowest address to ULONG_MAX
Srinivas Pandruvada (1): platform/x86: ISST: Correct command storage data length
Srinivasan Shanmugam (1): drm/amdkfd: Fix Circular Locking Dependency in 'svm_range_cpu_invalidate_pagetables'
Stanislav Spassov (1): x86/fpu: Fix guest FPU state buffer allocation size
Stanley Chu (1): i3c: master: svc: Fix missing the IBI rules
Stefan Binding (2): ALSA: hda/realtek: Add support for ASUS ROG Strix G614 Laptops using CS35L41 HDA ALSA: hda/realtek: Add support for ASUS Zenbook UM3406KA Laptops using CS35L41 HDA
Stefano Garzarella (1): vsock: avoid timeout during connect() if the socket is closing
Steven Rostedt (2): tracing: Switch trace_events_hist.c code over to use guard() tracing: Do not use PERF enums when perf is not defined
Steven Rostedt (Google) (1): tracing: Allow creating instances with specified system events
Takashi Iwai (2): ALSA: hda/realtek: Always honor no_shutup_pins ALSA: hda/realtek: Fix built-in mic breakage on ASUS VivoBook X515JA
Tanya Agarwal (1): lib: 842: Improve error handling in sw842_compress()
Tao Chen (1): perf/ring_buffer: Allow the EPOLLRDNORM flag for poll
Tasos Sahanidis (1): hwmon: (nct6775-core) Fix out of bounds access for NCT679{8,9}
Tengda Wu (2): tracing: Correct the refcount if the hist/hist_debug file fails to open tracing: Fix use-after-free in print_graph_function_flags during tracer switching
Thadeu Lima de Souza Cascardo (1): drm/amd/display: avoid NPD when ASIC does not support DMUB
Theodore Ts'o (1): ext4: don't over-report free space or inodes in statvfs
Thippeswamy Havalige (1): PCI: xilinx-cpm: Fix IRQ domain leak in error path of probe
Thomas Richter (1): perf bench: Fix perf bench syscall loop count
Tianyu Lan (1): x86/hyperv: Fix check of return value from snp_set_vmsa()
Tim Schumacher (1): selinux: Chain up tool resolving errors in install_policy.sh
Tobias Waldekranz (1): net: mvpp2: Prevent parser TCAM memory corruption
Tomi Valkeinen (1): drm: xlnx: zynqmp: Fix max dma segment size
Trond Myklebust (2): NFSv4: Don't trigger uneccessary scans for return-on-close delegations NFS: Shut down the nfs_client only after all the superblocks
Ulf Hansson (1): mmc: sdhci-omap: Disable MMC_CAP_AGGRESSIVE_PM for eMMC/SD
Uwe Kleine-König (2): iio: adc: ad4130: Fix comparison of channel setups iio: adc: ad7124: Fix comparison of channel configs
Vasiliy Kovalev (1): ocfs2: validate l_tree_depth to avoid out-of-bounds access
Viktor Malik (1): selftests/bpf: Fix string read in strncmp benchmark
Vitaliy Shevtsov (2): ASoC: cs35l41: check the return value from spi_setup() drm/amd/display: fix type mismatch in CalculateDynamicMetadataParameters()
Vitaly Lifshits (1): e1000e: change k1 configuration on MTP and later platforms
Vladimir Lypak (1): clk: qcom: gcc-msm8953: fix stuck venus0_core0 clock
Vladis Dronov (1): x86/sgx: Warn explicitly if X86_FEATURE_SGX_LC is not enabled
Waiman Long (1): locking/semaphore: Use wake_q to wake up processes outside lock critical section
Wang Zhaolong (1): smb: client: Fix netns refcount imbalance causing leaks and use-after-free
Wayne Lin (1): drm/dp_mst: Fix drm RAD print
Wenkai Lin (3): crypto: hisilicon/sec2 - fix for aead authsize alignment crypto: hisilicon/sec2 - fix for sec spec check crypto: hisilicon/sec2 - fix for aead auth key length
Wentao Guan (1): HID: i2c-hid: improve i2c_hid_get_report error message
Will McVicker (1): clk: samsung: Fix UBSAN panic in samsung_clk_init()
Yajun Deng (1): ntb_hw_switchtec: Fix shift-out-of-bounds in switchtec_ntb_mw_set_trans
Yeoreum Yun (1): perf/core: Fix child_total_time_enabled accounting bug at task exit
Ying Lu (1): usbnet:fix NPE during rx_complete
Yuanfang Zhang (1): coresight-etm4x: add isb() before reading the TRCSTATR
Yuezhang Mo (1): exfat: fix the infinite loop in exfat_find_last_cluster()
Yuli Wang (1): LoongArch: Rework the arch_kgdb_breakpoint() implementation
Zijun Hu (1): of: property: Increase NR_FWNODE_REFERENCE_ARGS
zuoqian (1): cpufreq: scpi: compare kHz instead of Hz
谢致邦 (XIE Zhibang) (2): staging: rtl8723bs: select CONFIG_CRYPTO_LIB_AES LoongArch: Fix help text of CMDLINE_EXTEND in Kconfig
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 12a16031d7b6..dc275ab60e53 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -524,6 +524,8 @@ patternProperties: description: GlobalTop Technology, Inc. "^gmt,.*": description: Global Mixed-mode Technology, Inc. + "^gocontroll,.*": + description: GOcontroll Modular Embedded Electronics B.V. "^goldelico,.*": description: Golden Delicious Computers GmbH & Co. KG "^goodix,.*": diff --git a/Makefile b/Makefile index 2b22872d3cea..45f6b7d3d51e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 6 -SUBLEVEL = 86 +SUBLEVEL = 87 EXTRAVERSION = NAME = Pinguïn Aangedreven
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c index deff21bfa680..b68e1d328d4c 100644 --- a/arch/arm64/kernel/compat_alignment.c +++ b/arch/arm64/kernel/compat_alignment.c @@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs) return 1; }
+ if (!handler) + return 1; type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 9fd8644a9a4c..623cf80639de 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -356,8 +356,8 @@ config CMDLINE_BOOTLOADER config CMDLINE_EXTEND bool "Use built-in to extend bootloader kernel arguments" help - The command-line arguments provided during boot will be - appended to the built-in command line. This is useful in + The built-in command line will be appended to the command- + line arguments provided during boot. This is useful in cases where the provided arguments are insufficient and you don't want to or cannot modify them.
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h index 1b6d09617199..aa622c754414 100644 --- a/arch/loongarch/include/asm/cache.h +++ b/arch/loongarch/include/asm/cache.h @@ -8,6 +8,8 @@ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16) + #define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c index 445c452d72a7..7be5b4c0c900 100644 --- a/arch/loongarch/kernel/kgdb.c +++ b/arch/loongarch/kernel/kgdb.c @@ -8,6 +8,7 @@ #include <linux/hw_breakpoint.h> #include <linux/kdebug.h> #include <linux/kgdb.h> +#include <linux/objtool.h> #include <linux/processor.h> #include <linux/ptrace.h> #include <linux/sched.h> @@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->csr_era = pc; }
-void arch_kgdb_breakpoint(void) +noinline void arch_kgdb_breakpoint(void) { __asm__ __volatile__ ( \ ".globl kgdb_breakinst\n\t" \ - "nop\n" \ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */ } +STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/* * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 6595e992fda8..dcb1428b458c 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx) */ if (seen_tail_call(ctx) && seen_call(ctx)) move_reg(ctx, TCC_SAVED, REG_TCC); + else + emit_insn(ctx, nop);
ctx->stack_size = stack_adjust; } @@ -842,7 +844,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0); - move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + + if (insn->src_reg != BPF_PSEUDO_CALL) + move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + break;
/* tail call */ @@ -867,7 +872,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext { const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32); + if (bpf_pseudo_func(insn)) + move_addr(ctx, dst, imm64); + else + move_imm(ctx, dst, imm64, is32); return 1; }
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index 68586338ecf8..f9c569f53949 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -27,6 +27,11 @@ struct jit_data { struct jit_ctx ctx; };
+static inline void emit_nop(union loongarch_instruction *insn) +{ + insn->word = INSN_NOP; +} + #define emit_insn(ctx, func, ...) \ do { \ if (ctx->image != NULL) { \ diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 56b876e418e9..6b998cb57255 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4..2c2999de6bfa 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1;
out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 38c5be34c895..3216245a648a 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -191,13 +191,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; }
+static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -212,6 +231,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret);
+ unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); }
@@ -404,7 +424,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry};
@@ -419,11 +439,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV;
- gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -435,8 +459,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, }
ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + }
if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -452,6 +479,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; }
@@ -481,6 +510,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -491,6 +521,21 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) return ret; }
+static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + static int spufs_gang_open(const struct path *path) { int ret; @@ -510,7 +555,7 @@ static int spufs_gang_open(const struct path *path) return PTR_ERR(filp); }
- filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -525,10 +570,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 84958487f696..d33787c57c39 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; };
/* Flag bits for spu_gang aff_flags */ diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index 8a2739485123..f96ace8ea1df 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -1,5 +1,9 @@ ifdef CONFIG_RELOCATABLE -KBUILD_CFLAGS += -fno-pie +# We can't use PIC/PIE when handling early-boot errata parsing, as the kernel +# doesn't have a GOT setup at that point. So instead just use medany: it's +# usually position-independent, so it should be good enough for the errata +# handling. +KBUILD_CFLAGS += -fno-pie -mcmodel=medany endif
obj-$(CONFIG_ERRATA_ANDES) += andes/ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 42777f91a9c5..9004dfec8c85 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -103,7 +103,7 @@ struct dyn_arch_ftrace { #define make_call_t0(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_t0(offset); \ call[1] = to_jalr_t0(offset); \ } while (0) @@ -119,7 +119,7 @@ do { \ #define make_call_ra(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_ra(offset); \ call[1] = to_jalr_ra(offset); \ } while (0) diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index cee1b9ca4ec4..e2e2a115afb5 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -468,6 +468,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba .type = etype, .size = sizeof(struct perf_event_attr), .pinned = true, + .disabled = true, /* * It should never reach here if the platform doesn't support the sscofpmf * extension as mode filtering won't work without it. diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index c9d70dc310d5..57afbc3270a3 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -148,22 +148,25 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, - unsigned long pte_num) + unsigned long ncontig) { - pte_t orig_pte = ptep_get(ptep); - unsigned long i; - - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); - - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = ptep_get_and_clear(mm, addr, ptep); + present = pte_present(pte); + while (--ncontig) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_get_and_clear(mm, addr, ptep); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } - - return orig_pte; + return pte; }
static pte_t get_clear_contig_flush(struct mm_struct *mm, @@ -212,6 +215,26 @@ static void clear_flush(struct mm_struct *mm, flush_tlb_range(&vma, saddr, addr); }
+static int num_contig_ptes_from_size(unsigned long sz, size_t *pgsize) +{ + unsigned long hugepage_shift; + + if (sz >= PGDIR_SIZE) + hugepage_shift = PGDIR_SHIFT; + else if (sz >= P4D_SIZE) + hugepage_shift = P4D_SHIFT; + else if (sz >= PUD_SIZE) + hugepage_shift = PUD_SHIFT; + else if (sz >= PMD_SIZE) + hugepage_shift = PMD_SHIFT; + else + hugepage_shift = PAGE_SHIFT; + + *pgsize = 1 << hugepage_shift; + + return sz >> hugepage_shift; +} + /* * When dealing with NAPOT mappings, the privileged specification indicates that * "if an update needs to be made, the OS generally should first mark all of the @@ -226,22 +249,10 @@ void set_huge_pte_at(struct mm_struct *mm, pte_t pte, unsigned long sz) { - unsigned long hugepage_shift, pgsize; + size_t pgsize; int i, pte_num;
- if (sz >= PGDIR_SIZE) - hugepage_shift = PGDIR_SHIFT; - else if (sz >= P4D_SIZE) - hugepage_shift = P4D_SHIFT; - else if (sz >= PUD_SIZE) - hugepage_shift = PUD_SHIFT; - else if (sz >= PMD_SIZE) - hugepage_shift = PMD_SHIFT; - else - hugepage_shift = PAGE_SHIFT; - - pte_num = sz >> hugepage_shift; - pgsize = 1 << hugepage_shift; + pte_num = num_contig_ptes_from_size(sz, &pgsize);
if (!pte_present(pte)) { for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) @@ -295,13 +306,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t orig_pte = ptep_get(ptep); int pte_num;
if (!pte_napot(orig_pte)) return ptep_get_and_clear(mm, addr, ptep);
- pte_num = napot_pte_num(napot_cont_order(orig_pte)); + pte_num = num_contig_ptes_from_size(sz, &pgsize);
return get_clear_contig(mm, addr, ptep, pte_num); } @@ -351,6 +363,7 @@ void huge_pte_clear(struct mm_struct *mm, pte_t *ptep, unsigned long sz) { + size_t pgsize; pte_t pte = ptep_get(ptep); int i, pte_num;
@@ -359,8 +372,9 @@ void huge_pte_clear(struct mm_struct *mm, return; }
- pte_num = napot_pte_num(napot_cont_order(pte)); - for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) + pte_num = num_contig_ptes_from_size(sz, &pgsize); + + for (i = 0; i < pte_num; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); }
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 0df646c6651e..3b382da2996f 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -211,7 +211,6 @@ extern int os_protect_memory(void *addr, unsigned long len, extern int os_unmap_memory(void *addr, int len); extern int os_drop_memory(void *addr, int length); extern int can_drop_memory(void); -extern int os_mincore(void *addr, unsigned long len);
/* execvp.c */ extern int execvp_noalloc(char *buf, const char *file, char *const argv[]); diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 811188be954c..321250f3f570 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -17,7 +17,7 @@ extra-y := vmlinux.lds obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ physmem.o process.o ptrace.o reboot.o sigio.o \ signal.o sysrq.o time.o tlb.o trap.o \ - um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/ + um_arch.o umid.o kmsg_dump.o capflags.o skas/ obj-y += load_file.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c deleted file mode 100644 index 8ccd56813f68..000000000000 --- a/arch/um/kernel/maccess.c +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 Richard Weinberger richrd@nod.at - */ - -#include <linux/uaccess.h> -#include <linux/kernel.h> -#include <os.h> - -bool copy_from_kernel_nofault_allowed(const void *src, size_t size) -{ - void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE); - - if ((unsigned long)src < PAGE_SIZE || size <= 0) - return false; - if (os_mincore(psrc, size + src - psrc) <= 0) - return false; - return true; -} diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index e52dd37ddadc..2686120ab232 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c @@ -223,57 +223,6 @@ int __init can_drop_memory(void) return ok; }
-static int os_page_mincore(void *addr) -{ - char vec[2]; - int ret; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - return 0; - else - return -errno; - } - - return vec[0] & 1; -} - -int os_mincore(void *addr, unsigned long len) -{ - char *vec; - int ret, i; - - if (len <= UM_KERN_PAGE_SIZE) - return os_page_mincore(addr); - - vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); - if (!vec) - return -ENOMEM; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - ret = 0; - else - ret = -errno; - - goto out; - } - - for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) { - if (!(vec[i] & 1)) { - ret = 0; - goto out; - } - } - - ret = 1; -out: - free(vec); - return ret; -} - void init_new_thread_signals(void) { set_handler(SIGSEGV); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a06fab5016fd..a0af6e8d584b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -215,7 +215,7 @@ config X86 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_EISA + select HAVE_EISA if X86_32 select HAVE_EXIT_THREAD select HAVE_FAST_GUP select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index f6907627172b..01e9593e2bd9 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ + /* We just clobbered the return address - use the IRET frame for unwinding: */ + UNWIND_HINT_IRET_REGS offset=3*8 .else pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index b163817ad6da..66d5782df18f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2720,28 +2720,33 @@ static u64 adl_update_topdown_event(struct perf_event *event)
DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
-static void intel_pmu_read_topdown_event(struct perf_event *event) +static void intel_pmu_read_event(struct perf_event *event) { - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + bool pmu_enabled = cpuc->enabled;
- /* Only need to call update_topdown_event() once for group read. */ - if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && - !is_slots_event(event)) - return; + /* Only need to call update_topdown_event() once for group read. */ + if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ)) + return;
- perf_pmu_disable(event->pmu); - static_call(intel_pmu_update_topdown_event)(event); - perf_pmu_enable(event->pmu); -} + cpuc->enabled = 0; + if (pmu_enabled) + intel_pmu_disable_all();
-static void intel_pmu_read_event(struct perf_event *event) -{ - if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) - intel_pmu_auto_reload_read(event); - else if (is_topdown_count(event)) - intel_pmu_read_topdown_event(event); - else - x86_perf_event_update(event); + if (is_topdown_event(event)) + static_call(intel_pmu_update_topdown_event)(event); + else + intel_pmu_drain_pebs_buffer(); + + cpuc->enabled = pmu_enabled; + if (pmu_enabled) + intel_pmu_enable_all(0); + + return; + } + + x86_perf_event_update(event); }
static void intel_pmu_enable_fixed(struct perf_event *event) @@ -3006,7 +3011,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++; x86_pmu_handle_guest_pebs(regs, &data); - x86_pmu.drain_pebs(regs, &data); + static_call(x86_pmu_drain_pebs)(regs, &data); status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/* diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d9a51b638931..dcb1e9b8d866 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -843,11 +843,11 @@ int intel_pmu_drain_bts_buffer(void) return 1; }
-static inline void intel_pmu_drain_pebs_buffer(void) +void intel_pmu_drain_pebs_buffer(void) { struct perf_sample_data data;
- x86_pmu.drain_pebs(NULL, &data); + static_call(x86_pmu_drain_pebs)(NULL, &data); }
/* @@ -1965,15 +1965,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; }
-void intel_pmu_auto_reload_read(struct perf_event *event) -{ - WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); - - perf_pmu_disable(event->pmu); - intel_pmu_drain_pebs_buffer(); - perf_pmu_enable(event->pmu); -} - /* * Special variant of intel_pmu_save_and_restart() for auto-reload. */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index c8ba2be7585d..4564521296ac 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1052,6 +1052,7 @@ extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); +DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) { @@ -1539,7 +1540,7 @@ void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
-void intel_pmu_auto_reload_read(struct perf_event *event); +void intel_pmu_drain_pebs_buffer(void);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index c2f78fabc865..b12bef0ff7bb 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void) x86_platform.realmode_init = x86_init_noop; x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = x86_init_noop; + x86_init.resources.probe_roms = x86_init_noop;
/* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_smp_config = x86_init_noop; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 8c6bf07f7d2b..87eabfcc1d1c 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -338,7 +338,7 @@ int hv_snp_boot_ap(int cpu, unsigned long start_ip) vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true); - if (!ret) { + if (ret) { pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); free_page((u64)vmsa); return ret; @@ -464,7 +464,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], enum hv_mem_host_visibility visibility) { struct hv_gpa_range_for_visibility *input; - u16 pages_processed; u64 hv_status; unsigned long flags;
@@ -493,7 +492,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn)); hv_status = hv_do_rep_hypercall( HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count, - 0, input, &pages_processed); + 0, input, NULL); local_irq_restore(flags);
if (hv_result_success(hv_status)) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 5d61adc6e892..a496d9dc75d9 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -242,7 +242,7 @@ void flush_tlb_multi(const struct cpumask *cpumask, flush_tlb_mm_range((vma)->vm_mm, start, end, \ ((vma)->vm_flags & VM_HUGETLB) \ ? huge_page_shift(hstate_vma(vma)) \ - : PAGE_SHIFT, false) + : PAGE_SHIFT, true)
extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c683e8dedfee..0ee172ce2d21 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -603,7 +603,7 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev, unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize)) - return -1; + return false;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 262f5fb18d74..c453953d5a33 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -150,13 +150,15 @@ int __init sgx_drv_init(void) u64 xfrm_mask; int ret;
- if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { + pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; + }
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
if (!(eax & 1)) { - pr_err("SGX disabled: SGX1 instruction support not available.\n"); + pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; }
@@ -173,8 +175,10 @@ int __init sgx_drv_init(void) }
ret = misc_register(&sgx_dev_enclave); - if (ret) + if (ret) { + pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret); return ret; + }
return 0; } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f18ca44c904b..52dc5839d1e8 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack); + stack = stack ?: get_stack_pointer(task, regs); regs = unwind_get_entry_regs(&state, &partial);
/* @@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - hardirq stack * - entry stack */ - for (stack = stack ?: get_stack_pointer(task, regs); - stack; - stack = stack_info.next_sp) { + for (; stack; stack = stack_info.next_sp) { const char *stack_name;
stack = PTR_ALIGN(stack, sizeof(long)); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 4b414b0ab069..aaed20f46be4 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) struct fpstate *fpstate; unsigned int size;
- size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); + size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); fpstate = vzalloc(size); if (!fpstate) return false; @@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) fpstate->is_guest = true;
gfpu->fpstate = fpstate; - gfpu->xfeatures = fpu_user_cfg.default_features; - gfpu->perm = fpu_user_cfg.default_features; + gfpu->xfeatures = fpu_kernel_cfg.default_features; + gfpu->perm = fpu_kernel_cfg.default_features;
/* * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 5351f293f770..bbe11363550b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -92,7 +92,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - memcpy(dst, src, arch_task_struct_size); + /* init_task is not dynamically sized (incomplete FPU state) */ + if (unlikely(src == &init_task)) + memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0); + else + memcpy(dst, src, arch_task_struct_size); + #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 37b8e20c03a9..d8d9bc5a9b32 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -377,6 +377,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs, } #endif
+/* + * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64 + * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch + * between configs triggers objtool warnings. + * + * This is a temporary hack until we have compiler or plugin support for + * annotating noreturns. + */ +#ifdef CONFIG_X86_ESPFIX64 +#define always_true() true +#else +bool always_true(void); +bool __weak always_true(void) { return true; } +#endif + /* * Runs on an IST stack for x86_64 and on a special task stack for x86_32. * @@ -512,7 +527,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); die("double fault", regs, error_code); - panic("Machine halted."); + if (always_true()) + panic("Machine halted."); instrumentation_end(); }
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 15f97c0abc9d..81e9b436c3b6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -955,7 +955,7 @@ static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return;
cyc2ns_suspend = sched_clock(); @@ -975,7 +975,7 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu;
- if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return;
local_irq_save(flags); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 0a81aafed7f8..0544a24baedb 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -74,6 +74,24 @@ SYM_FUNC_START(rep_movs_alternative) _ASM_EXTABLE_UA( 0b, 1b)
.Llarge_movsq: + /* Do the first possibly unaligned word */ +0: movq (%rsi),%rax +1: movq %rax,(%rdi) + + _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail) + _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail) + + /* What would be the offset to the aligned destination? */ + leaq 8(%rdi),%rax + andq $-8,%rax + subq %rdi,%rax + + /* .. and update pointers and count to match */ + addq %rax,%rdi + addq %rax,%rsi + subq %rax,%rcx + + /* make %rcx contain the number of words, %rax the remainder */ movq %rcx,%rax shrq $3,%rcx andl $7,%eax diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index cc47a818a640..075899e3fc8a 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -562,7 +562,7 @@ void __head sme_enable(struct boot_params *bp) }
RIP_REL_REF(sme_me_mask) = me_mask; - physical_mask &= ~me_mask; - cc_vendor = CC_VENDOR_AMD; + RIP_REL_REF(physical_mask) &= ~me_mask; + RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; cc_set_mask(me_mask); } diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c index 3d2f7f0a6ed1..ad3c1feec990 100644 --- a/arch/x86/mm/pat/cpa-test.c +++ b/arch/x86/mm/pat/cpa-test.c @@ -183,7 +183,7 @@ static int pageattr_test(void) break;
case 1: - err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1); + err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1); break;
case 2: diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index e7b9ac63bb02..8dc4eedd4947 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -982,29 +982,42 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, return -EINVAL; }
-/* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). - * - * If the vma has a linear pfn mapping for the entire range, we get the prot - * from pte and reserve the entire vma range with single reserve_pfn_range call. - */ -int track_pfn_copy(struct vm_area_struct *vma) +int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn) { + const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start; resource_size_t paddr; - unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; + int rc;
- if (vma->vm_flags & VM_PAT) { - if (get_pat_info(vma, &paddr, &pgprot)) - return -EINVAL; - /* reserve the whole chunk covered by vma. */ - return reserve_pfn_range(paddr, vma_size, &pgprot, 1); - } + if (!(src_vma->vm_flags & VM_PAT)) + return 0; + + /* + * Duplicate the PAT information for the dst VMA based on the src + * VMA. + */ + if (get_pat_info(src_vma, &paddr, &pgprot)) + return -EINVAL; + rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1); + if (rc) + return rc;
+ /* Reservation for the destination VMA succeeded. */ + vm_flags_set(dst_vma, VM_PAT); + *pfn = PHYS_PFN(paddr); return 0; }
+void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn) +{ + untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true); + /* + * Reservation was freed, any copied page tables will get cleaned + * up later, but without getting PAT involved again. + */ +} + /* * prot is passed in as a parameter for the new mapping. If the vma has * a linear pfn mapping for the entire range, or no vma is provided, @@ -1093,15 +1106,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, } }
-/* - * untrack_pfn_clear is called if the following situation fits: - * - * 1) while mremapping a pfnmap for a new region, with the old vma after - * its pfnmap page table has been removed. The new vma has a new pfnmap - * to the same pfn & cache type with VM_PAT set. - * 2) while duplicating vm area, the new vma fails to copy the pgtable from - * old vma. - */ void untrack_pfn_clear(struct vm_area_struct *vma) { vm_flags_clear(vma, VM_PAT); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7918923e3b74..a466ad6e5d93 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_mask = nd_desc->cmd_mask; if (cmd == ND_CMD_CALL && call_pkg->nd_family) { family = call_pkg->nd_family; - if (family > NVDIMM_BUS_FAMILY_MAX || + if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX || !test_bit(family, &nd_desc->bus_family_mask)) return -EINVAL; family = array_index_nospec(family, diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 831fa4a12159..0888e4d618d5 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", pr->power.states[ACPI_STATE_C3].address);
+ if (!pr->power.states[ACPI_STATE_C2].address && + !pr->power.states[ACPI_STATE_C3].address) + return -ENODEV; + return 0; }
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 96a987506e71..531684a69c64 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + /* Asus Vivobook X1404VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"), + }, + }, { /* Asus Vivobook X1504VAP */ .matches = { diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index e894fdf6d553..aac052e2820c 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -367,7 +367,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Medion Lifetab S10346 */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9c5a5f4dba5a..343d3c966e7a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -894,6 +894,9 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete;
+ if (!dev->power.is_suspended) + goto Complete; + if (dev->power.direct_complete) { /* Match the pm_runtime_disable() in __device_suspend(). */ pm_runtime_enable(dev); @@ -912,9 +915,6 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false;
- if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -954,7 +954,6 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async) error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false;
- Unlock: device_unlock(dev); dpm_watchdog_clear(&wd);
@@ -1236,14 +1235,13 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a dev->power.is_noirq_suspended = true;
/* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true;
if (dev->power.must_resume) @@ -1639,6 +1637,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; }
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 4545669cb973..0af26cf8c005 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1841,7 +1841,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); }
-static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index ab2a028b3027..f802e54d7b70 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -170,14 +170,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_GATE("asrc", ASRC_IPG), CLK_GATE("pdm", PDM_IPG), CLK_GATE("earc", EARC_IPG), - CLK_GATE("ocrama", OCRAMA_IPG), + CLK_GATE_PARENT("ocrama", OCRAMA_IPG, "axi"), CLK_GATE("aud2htx", AUD2HTX_IPG), CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("spba2", SPBA2_ROOT), - CLK_GATE("dsp", DSP_ROOT), - CLK_GATE("dspdbg", DSPDBG_ROOT), + CLK_GATE_PARENT("dsp", DSP_ROOT, "axi"), + CLK_GATE_PARENT("dspdbg", DSPDBG_ROOT, "axi"), CLK_GATE("edma", EDMA_ROOT), CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), CLK_GATE("mu2", MU2_ROOT), diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index f373a8d48b1d..233ce4a4c1c2 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -1138,8 +1138,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = { .hw.init = &(struct clk_init_data) { .name = "cpu_clk_div16_en", .ops = &clk_regmap_gate_ro_ops, - .parent_hws = (const struct clk_hw *[]) { - &g12a_cpu_clk.hw + .parent_data = &(const struct clk_parent_data) { + /* + * Note: + * G12A and G12B have different cpu clocks (with + * different struct clk_hw). We fallback to the global + * naming string mechanism so this clock picks + * up the appropriate one. Same goes for the other + * clock using cpu cluster A clock output and present + * on both G12 variant. + */ + .name = "cpu_clk", + .index = -1, }, .num_parents = 1, /* @@ -1204,7 +1214,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_apb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1238,7 +1251,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_atb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1272,7 +1288,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_axi_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1307,13 +1326,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = { .name = "cpu_clk_trace_div", .ops = &clk_regmap_divider_ro_ops, .parent_data = &(const struct clk_parent_data) { - /* - * Note: - * G12A and G12B have different cpu_clks (with - * different struct clk_hw). We fallback to the global - * naming string mechanism so cpu_clk_trace_div picks - * up the appropriate one. - */ .name = "cpu_clk", .index = -1, }, @@ -4189,7 +4201,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14); static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19); static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20); static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23); -static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4); +static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24); static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25); static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26); static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28); diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 1b1279d94781..a133013356b6 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -1272,14 +1272,13 @@ static struct clk_regmap gxbb_cts_i958 = { }, };
+/* + * This table skips a clock named 'cts_slow_oscin' in the documentation + * This clock does not exist yet in this controller or the AO one + */ +static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 }; static const struct clk_parent_data gxbb_32k_clk_parent_data[] = { { .fw_name = "xtal", }, - /* - * FIXME: This clock is provided by the ao clock controller but the - * clock is not yet part of the binding of this controller, so string - * name must be use to set this parent. - */ - { .name = "cts_slow_oscin", .index = -1 }, { .hw = &gxbb_fclk_div3.hw }, { .hw = &gxbb_fclk_div5.hw }, }; @@ -1289,6 +1288,7 @@ static struct clk_regmap gxbb_32k_clk_sel = { .offset = HHI_32K_CLK_CNTL, .mask = 0x3, .shift = 16, + .table = gxbb_32k_clk_parents_val_table, }, .hw.init = &(struct clk_init_data){ .name = "32k_clk_sel", @@ -1312,7 +1312,7 @@ static struct clk_regmap gxbb_32k_clk_div = { &gxbb_32k_clk_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + .flags = CLK_SET_RATE_PARENT, }, };
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c index 3e5a8cb14d4d..e6e2ab1380f2 100644 --- a/drivers/clk/qcom/gcc-msm8953.c +++ b/drivers/clk/qcom/gcc-msm8953.c @@ -3770,7 +3770,7 @@ static struct clk_branch gcc_venus0_axi_clk = {
static struct clk_branch gcc_venus0_core0_vcodec0_clk = { .halt_reg = 0x4c02c, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x4c02c, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c index bc19a23e13f8..4d187d6aba73 100644 --- a/drivers/clk/qcom/mmcc-sdm660.c +++ b/drivers/clk/qcom/mmcc-sdm660.c @@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = {
static struct clk_branch video_subcore0_clk = { .halt_reg = 0x1048, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x1048, .enable_mask = BIT(0), diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 267ab54937d3..a3587c500de2 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -201,7 +201,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri", "gpll_peri", "hdmiphy_peri" }; PNAME(mux_ref_usb3otg_src_p) = { "xin24m", - "clk_usb3otg_ref" }; + "clk_ref_usb3otg_src" }; PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc32k" }; PNAME(mux_mac2io_src_p) = { "clk_mac2io_src", diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index b6701905f254..fa5bdf6b3a92 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -74,12 +74,12 @@ struct samsung_clk_provider * __init samsung_clk_init(struct device *dev, if (!ctx) panic("could not allocate clock provider context.\n");
+ ctx->clk_data.num = nr_clks; for (i = 0; i < nr_clks; ++i) ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->dev = dev; ctx->reg_base = base; - ctx->clk_data.num = nr_clks; spin_lock_init(&ctx->lock);
return ctx; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index af44ee6a6430..1a7fcaf39cc9 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy) time_elapsed = update_time - j_cdbs->prev_update_time; j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + /* + * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if + * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is + * off, where idle_time is calculated by the difference between + * time elapsed in jiffies and "busy time" obtained from CPU + * statistics. If a CPU is 100% busy, the time elapsed and busy + * time should grow with the same amount in two consecutive + * samples, but in practice there could be a tiny difference, + * making the accumulated idle time decrease sometimes. Hence, + * in this case, idle_time should be regarded as 0 in order to + * make the further process correct. + */ + if (cur_idle_time > j_cdbs->prev_cpu_idle) + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + else + idle_time = 0; + j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) { @@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely((int)idle_time > 2 * sampling_rate && + } else if (unlikely(idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy) load = j_cdbs->prev_load; j_cdbs->prev_load = 0; } else { - if (time_elapsed >= idle_time) { + if (time_elapsed > idle_time) load = 100 * (time_elapsed - idle_time) / time_elapsed; - } else { - /* - * That can happen if idle_time is returned by - * get_cpu_idle_time_jiffy(). In that case - * idle_time is roughly equal to the difference - * between time_elapsed and "busy time" obtained - * from CPU statistics. Then, the "busy time" - * can end up being greater than time_elapsed - * (for example, if jiffies_64 and the CPU - * statistics are updated by different CPUs), - * so idle_time may in fact be negative. That - * means, though, that the CPU was busy all - * the time (on the rough average) during the - * last sampling interval and 100 can be - * returned as the load. - */ - load = (int)idle_time < 0 ? 100 : 0; - } + else + load = 0; + j_cdbs->prev_load = load; }
- if (unlikely((int)idle_time > 2 * sampling_rate)) { + if (unlikely(idle_time > 2 * sampling_rate)) { unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods) diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index d33be56983ed..bfc2e65e1e50 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) static int scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - u64 rate = policy->freq_table[index].frequency * 1000; + unsigned long freq_khz = policy->freq_table[index].frequency; struct scpi_data *priv = policy->driver_data; + unsigned long rate = freq_khz * 1000; int ret;
ret = clk_set_rate(priv->clk, rate); @@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) if (ret) return ret;
- if (clk_get_rate(priv->clk) != rate) + if (clk_get_rate(priv->clk) / 1000 != freq_khz) return -EIO;
return 0; diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 30c2b1a64695..2fc04e210bc4 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,7 +37,6 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; - bool fallback; };
/* SEC request of Crypto */ diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 8a6dd2513370..d6727b8ff582 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3
#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth))
-#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -690,14 +689,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
c_ctx->fallback = false;
- /* Currently, only XTS mode need fallback tfm when using 192bit key */ - if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) - return 0; - c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc xts mode fallback tfm!\n"); + pr_err("failed to alloc fallback tfm for %s!\n", alg); return PTR_ERR(c_ctx->fbtfm); }
@@ -859,7 +854,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, }
memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback && c_ctx->fbtfm) { + if (c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1094,11 +1089,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret;
- if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1110,7 +1100,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; }
@@ -1164,8 +1155,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, }
ret = crypto_authenc_extractkeys(&keys, key, keylen); - if (ret) + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + }
ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { @@ -1179,12 +1172,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; }
- if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { - ret = -EINVAL; - dev_err(dev, "AUTH key length error!\n"); - goto bad_key; - } - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); if (ret) { dev_err(dev, "set sec fallback key err!\n"); @@ -1587,11 +1574,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1643,12 +1629,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |= - cpu_to_le32((u32)(authsize / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -2007,8 +1991,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); }
-static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, - struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; @@ -2032,10 +2015,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, case SEC_CMODE_CFB: case SEC_CMODE_OFB: case SEC_CMODE_CTR: - if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { - dev_err(dev, "skcipher HW version error!\n"); - ret = -EINVAL; - } break; default: ret = -EINVAL; @@ -2044,17 +2023,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, return ret; }
-static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg;
- if (unlikely(!sk_req->src || !sk_req->dst || - sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen;
if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -2112,6 +2095,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret;
if (!sk_req->cryptlen) { @@ -2125,11 +2109,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.encrypt = encrypt; req->ctx = ctx;
- ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL;
- if (unlikely(ctx->c_ctx.fallback)) + if (unlikely(ctx->c_ctx.fallback || need_fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
return ctx->req_op->process(ctx, req); @@ -2262,52 +2246,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; - struct device *dev = ctx->dev; int ret;
- /* Hardware does not handle cases where authsize is less than 4 bytes */ - if (unlikely(sz < MIN_MAC_LEN)) { - sreq->aead_req.fallback = true; + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) return -EINVAL; - }
if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + req->assoclen > SEC_MAX_AAD_LEN)) return -EINVAL; - }
if (c_mode == SEC_CMODE_CCM) { - if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { - dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) return -EINVAL; - } - ret = aead_iv_demension_check(req); - if (ret) { - dev_err(dev, "aead input iv param error!\n"); - return ret; - } - }
- if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - sz; - if (c_mode == SEC_CMODE_CBC) { - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; - } }
return 0; }
-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg;
@@ -2316,12 +2283,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; }
- if (ctx->sec->qm.ver == QM_HW_V2) { - if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - sreq->aead_req.fallback = true; - return -EINVAL; - } + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; }
/* Support AES or SM4 */ @@ -2330,8 +2295,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; }
- if (unlikely(sec_aead_spec_check(ctx, sreq))) + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; + }
if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) @@ -2375,17 +2342,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret;
req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->aead_req.fallback = false; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
- ret = sec_aead_param_check(ctx, req); + ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { - if (req->aead_req.fallback) + if (need_fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 35f2d0d8507e..7e98f174f69b 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1144,6 +1144,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc;
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1154,27 +1155,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return;
- caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc));
if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1184,13 +1184,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; }
kfree(hv_nxc); -out: - kfree(hv_caps); }
static const struct vio_device_id nx842_vio_driver_ids[] = { diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index cd394eae47d1..cc9923ab686d 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -675,9 +675,9 @@ static int fsl_edma_remove(struct platform_device *pdev) struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
fsl_edma_irq_exit(pdev, fsl_edma); - fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); + fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return 0; diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 535f058b48ee..67a46abe07da 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -755,6 +755,8 @@ static int i10nm_get_ddr_munits(void) continue; } else { d->imc[lmc].mdev = mdev; + if (res_cfg->type == SPR) + skx_set_mc_mapping(d, i, lmc); lmc++; } } diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 9ef13570f2e5..56be8ef40f37 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -91,8 +91,6 @@ (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \ PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-#define IE31200_DIMMS 4 -#define IE31200_RANKS 8 #define IE31200_RANKS_PER_CHANNEL 4 #define IE31200_DIMMS_PER_CHANNEL 2 #define IE31200_CHANNELS 2 @@ -164,6 +162,7 @@ #define IE31200_MAD_DIMM_0_OFFSET 0x5004 #define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C #define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0) +#define IE31200_MAD_DIMM_SIZE_SKL GENMASK_ULL(5, 0) #define IE31200_MAD_DIMM_A_RANK BIT(17) #define IE31200_MAD_DIMM_A_RANK_SHIFT 17 #define IE31200_MAD_DIMM_A_RANK_SKL BIT(10) @@ -377,7 +376,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev) static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan) { - dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE; + dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE_SKL; dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0; dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >> (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4))); @@ -426,7 +425,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
nr_channels = how_many_channels(pdev); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = IE31200_DIMMS; + layers[0].size = IE31200_RANKS_PER_CHANNEL; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_channels; @@ -618,7 +617,7 @@ static int __init ie31200_init(void)
pci_rc = pci_register_driver(&ie31200_driver); if (pci_rc < 0) - goto fail0; + return pci_rc;
if (!mci_pdev) { ie31200_registered = 0; @@ -629,11 +628,13 @@ static int __init ie31200_init(void) if (mci_pdev) break; } + if (!mci_pdev) { edac_dbg(0, "ie31200 pci_get_device fail\n"); pci_rc = -ENODEV; - goto fail1; + goto fail0; } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); if (pci_rc < 0) { edac_dbg(0, "ie31200 init fail\n"); @@ -641,12 +642,12 @@ static int __init ie31200_init(void) goto fail1; } } - return 0;
+ return 0; fail1: - pci_unregister_driver(&ie31200_driver); -fail0: pci_dev_put(mci_pdev); +fail0: + pci_unregister_driver(&ie31200_driver);
return pci_rc; } diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 0b8aaf5f77d9..d47f0055217e 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -120,6 +120,35 @@ void skx_adxl_put(void) } EXPORT_SYMBOL_GPL(skx_adxl_put);
+static void skx_init_mc_mapping(struct skx_dev *d) +{ + /* + * By default, the BIOS presents all memory controllers within each + * socket to the EDAC driver. The physical indices are the same as + * the logical indices of the memory controllers enumerated by the + * EDAC driver. + */ + for (int i = 0; i < NUM_IMC; i++) + d->mc_mapping[i] = i; +} + +void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc) +{ + edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n", + pmc, lmc); + + d->mc_mapping[pmc] = lmc; +} +EXPORT_SYMBOL_GPL(skx_set_mc_mapping); + +static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc) +{ + edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n", + pmc, d->mc_mapping[pmc]); + + return d->mc_mapping[pmc]; +} + static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src) { struct skx_dev *d; @@ -187,6 +216,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src) return false; }
+ res->imc = skx_get_mc_mapping(d, res->imc); + for (i = 0; i < adxl_component_count; i++) { if (adxl_values[i] == ~0x0ull) continue; @@ -307,6 +338,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list) d->bus[0], d->bus[1], d->bus[2], d->bus[3]); list_add_tail(&d->list, &dev_edac_list); prev = pdev; + + skx_init_mc_mapping(d); }
if (list) diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h index e7f18ada1668..5acfef8fd3d3 100644 --- a/drivers/edac/skx_common.h +++ b/drivers/edac/skx_common.h @@ -94,6 +94,16 @@ struct skx_dev { struct pci_dev *uracu; /* for i10nm CPU */ struct pci_dev *pcu_cr3; /* for HBM memory detection */ u32 mcroute; + /* + * Some server BIOS may hide certain memory controllers, and the + * EDAC driver skips those hidden memory controllers. However, the + * ADXL still decodes memory error address using physical memory + * controller indices. The mapping table is used to convert the + * physical indices (reported by ADXL) to the logical indices + * (used the EDAC driver) of present memory controllers during the + * error handling process. + */ + u8 mc_mapping[NUM_IMC]; struct skx_imc { struct mem_ctl_info *mci; struct pci_dev *mdev; /* for i10nm CPU */ @@ -243,6 +253,7 @@ void skx_adxl_put(void); void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log); void skx_set_mem_cfg(bool mem_cfg_2lm); void skx_set_res_cfg(struct res_config *cfg); +void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id); int skx_get_node_id(struct skx_dev *d, u8 *id); diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 4ce5681be18f..c015d2b4c5cf 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -1584,6 +1584,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
+ ret = 0; out_fw: cs_dsp_buf_free(&buf_list); kfree(text); @@ -2299,6 +2300,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
+ ret = 0; out_fw: cs_dsp_buf_free(&buf_list); kfree(text); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f9bc38d20ce3..a51ceebb8054 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2461,7 +2461,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
adev->in_s4 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_s4 = false; if (r) return r;
@@ -2473,8 +2472,13 @@ static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r;
- return amdgpu_device_resume(drm_dev, true); + r = amdgpu_device_resume(drm_dev, true); + adev->in_s4 = false; + + return r; }
static int amdgpu_pmops_poweroff(struct device *dev) @@ -2487,6 +2491,9 @@ static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 54ec9b32562c..480d718d09cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1318,7 +1318,7 @@ static int gfx_v11_0_sw_init(void *handle) adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 43fa260ddbce..4d9a406925e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -197,21 +197,6 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, if (dqm->is_hws_hang) return -EIO;
- if (!pdd->proc_ctx_cpu_ptr) { - r = amdgpu_amdkfd_alloc_gtt_mem(adev, - AMDGPU_MES_PROC_CTX_SIZE, - &pdd->proc_ctx_bo, - &pdd->proc_ctx_gpu_addr, - &pdd->proc_ctx_cpu_ptr, - false); - if (r) { - dev_err(adev->dev, - "failed to allocate process context bo\n"); - return r; - } - memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); - } - memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); queue_input.process_id = qpd->pqm->process->pasid; queue_input.page_table_base_addr = qpd->page_table_base; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index a02777694d99..e057c2bc7be4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -329,10 +329,26 @@ int pqm_create_queue(struct process_queue_manager *pqm, if (retval != 0) return retval;
+ /* Register process if this is the first queue */ if (list_empty(&pdd->qpd.queues_list) && list_empty(&pdd->qpd.priv_queue_list)) dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
+ /* Allocate proc_ctx_bo only if MES is enabled and this is the first queue */ + if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) { + retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, + AMDGPU_MES_PROC_CTX_SIZE, + &pdd->proc_ctx_bo, + &pdd->proc_ctx_gpu_addr, + &pdd->proc_ctx_cpu_ptr, + false); + if (retval) { + dev_err(dev->adev->dev, "failed to allocate process context bo\n"); + return retval; + } + memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); + } + pqn = kzalloc(sizeof(*pqn), GFP_KERNEL); if (!pqn) { retval = -ENOMEM; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2b7f98a2e36f..3696b9112c74 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2890,6 +2890,11 @@ static int dm_resume(void *handle)
return 0; } + + /* leave display off for S4 sequence */ + if (adev->in_s4) + return 0; + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_release_state(dm_state->context); dm_state->context = dc_create_state(dm->dc); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index f365773d5714..e9b3c1c7a931 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -37,6 +37,9 @@ #include "dce/dce_i2c.h" struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index) { + if (link_index >= (MAX_PIPES * 2)) + return NULL; + return dc->links[link_index]; }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 5c7530287730..4dc9856e8730 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -63,6 +63,10 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link) { + /* ASIC doesn't support DMUB */ + if (!link->ctx->dmub_srv) + return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index ad741a723c0e..72ffa1abebaa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -281,10 +281,10 @@ static void CalculateDynamicMetadataParameters( double DISPCLK, double DCFClkDeepSleep, double PixelClock, - long HTotal, - long VBlank, - long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, + unsigned int HTotal, + unsigned int VBlank, + unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, @@ -3277,8 +3277,8 @@ static double CalculateWriteBackDelay(
static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK, - double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, + double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks) { double TotalRepeaterDelayTime = 0; diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index fe33b988d752..e094165e584a 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2039,12 +2039,13 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) continue; }
- for (i = 0; i < 5; i++) { + for (i = 0; i < 5; i++) if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || - av[i][1] != av[i][2] || bv[i][0] != av[i][3]) + bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) break;
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i); + if (i == 5) { + DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry); return true; } } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 3309c01fa715..bfbd3fee1256 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -480,6 +480,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, const char *name) { struct device *dev = pdata->dev; + const struct i2c_client *client = to_i2c_client(dev); struct auxiliary_device *aux; int ret;
@@ -488,6 +489,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, return -ENOMEM;
aux->name = name; + aux->id = (client->adapter->nr << 10) | client->addr; aux->dev.parent = dev; aux->dev.release = ti_sn65dsi86_aux_device_release; device_set_of_node_from_dev(&aux->dev, dev); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 71a30387ca12..21ff7ef7ce92 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -178,13 +178,13 @@ static int drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) { int i; - u8 unpacked_rad[16]; + u8 unpacked_rad[16] = {};
- for (i = 0; i < lct; i++) { + for (i = 1; i < lct; i++) { if (i % 2) - unpacked_rad[i] = rad[i / 2] >> 4; + unpacked_rad[i] = rad[(i - 1) / 2] >> 4; else - unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); + unpacked_rad[i] = rad[(i - 1) / 2] & 0xF; }
/* TODO: Eventually add something to printk so we can format the rad diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index be4de26c77f9..199527643093 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -1648,7 +1648,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val); if (ret < 1) { - drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n"); + dev_err(mtk_dp->dev, "Read mstm cap failed: %zd\n", ret); return ret == 0 ? -EIO : ret; }
@@ -1658,7 +1658,7 @@ static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp) DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &val); if (ret < 1) { - drm_err(mtk_dp->drm_dev, "Read irq vector failed\n"); + dev_err(mtk_dp->dev, "Read irq vector failed: %zd\n", ret); return ret == 0 ? -EIO : ret; }
@@ -1941,7 +1941,7 @@ static int mtk_dp_wait_hpd_asserted(struct drm_dp_aux *mtk_aux, unsigned long wa
ret = mtk_dp_parse_capabilities(mtk_dp); if (ret) { - drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n"); + dev_err(mtk_dp->dev, "Can't parse capabilities: %d\n", ret); return ret; }
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 0d96264ec5c6..f154b3a7c2c2 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -1016,12 +1016,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct mtk_dsi *dsi = host_to_dsi(host); - u32 recv_cnt, i; + ssize_t recv_cnt; u8 read_data[16]; void *src_addr; u8 irq_flag = CMD_DONE_INT_FLAG; u32 dsi_mode; - int ret; + int ret, i;
dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); if (dsi_mode & MODE) { @@ -1070,7 +1070,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, if (recv_cnt) memcpy(msg->rx_buf, src_addr, recv_cnt);
- DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", + DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n", recv_cnt, *((u8 *)(msg->tx_buf)));
restore_dsi_mode: diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 86133bf16326..68d0b65ef783 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type {
struct hdmi_audio_param { enum hdmi_audio_coding_type aud_codec; - enum hdmi_audio_sample_size aud_sampe_size; + enum hdmi_audio_sample_size aud_sample_size; enum hdmi_aud_input_type aud_input_type; enum hdmi_aud_i2s_fmt aud_i2s_fmt; enum hdmi_aud_mclk aud_mclk; @@ -173,6 +173,7 @@ struct mtk_hdmi { unsigned int sys_offset; void __iomem *regs; enum hdmi_colorspace csp; + struct platform_device *audio_pdev; struct hdmi_audio_param aud_param; bool audio_enable; bool powered; @@ -1074,7 +1075,7 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
hdmi->csp = HDMI_COLORSPACE_RGB; aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; @@ -1575,14 +1576,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, switch (daifmt->fmt) { case HDMI_I2S: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S; hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS; break; case HDMI_SPDIF: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; break; default: @@ -1666,6 +1667,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .no_capture_mute = 1, };
+static void mtk_hdmi_unregister_audio_driver(void *data) +{ + platform_device_unregister(data); +} + static int mtk_hdmi_register_audio_driver(struct device *dev) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1675,13 +1681,20 @@ static int mtk_hdmi_register_audio_driver(struct device *dev) .i2s = 1, .data = hdmi, }; - struct platform_device *pdev; + int ret;
- pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, &codec_data, - sizeof(codec_data)); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, + hdmi->audio_pdev); + if (ret) + return ret;
DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME); return 0; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index ad57368dc13f..2df1e6293062 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1210,10 +1210,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
- /* force a full mode set if active state changed */ - if (crtc_state->active_changed) - crtc_state->mode_changed = true; - if (cstate->num_mixers) { rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); if (rc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 35cf9080168b..99cccde5d221 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -669,12 +669,11 @@ static int dpu_encoder_virt_atomic_check(
/* * Release and Allocate resources on every modeset - * Dont allocate when active is false. */ if (drm_atomic_crtc_needs_modeset(crtc_state)) { dpu_rm_release(global_state, drm_enc);
- if (!crtc_state->active_changed || crtc_state->enable) + if (crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, topology); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f920329fe2e0..f90ccdfbb2fc 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -825,7 +825,7 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host, dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); }
-static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) +static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode) { struct drm_dsc_config *dsc = msm_host->dsc; u32 reg, reg_ctrl, reg_ctrl2; @@ -837,7 +837,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod /* first calculate dsc parameters and then program * compress mode registers */ - slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay); + slice_per_intf = dsc->slice_count;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf; bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */ @@ -948,7 +948,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, false, mode->hdisplay); + dsi_update_dsc_timing(msm_host, false);
dsi_write(msm_host, REG_DSI_ACTIVE_H, DSI_ACTIVE_H_START(ha_start) | @@ -969,7 +969,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); } else { /* command mode */ if (msm_host->dsc) - dsi_update_dsc_timing(msm_host, true, mode->hdisplay); + dsi_update_dsc_timing(msm_host, true);
/* image data and 1 byte write_memory_start cmd */ if (!msm_host->dsc) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 28b8012a21f2..1a75f9c18c24 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -74,17 +74,35 @@ static int dsi_mgr_setup_components(int id) int ret;
if (!IS_BONDED_DSI()) { + /* + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; - - msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); } else if (other_dsi) { struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? other_dsi : msm_dsi; + + /* + * PLL0 is to drive both DSI link clocks in bonded DSI mode. + * + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); + /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -98,14 +116,6 @@ static int dsi_mgr_setup_components(int id) ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; - - /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ - msm_dsi_phy_set_usecase(clk_master_dsi->phy, - MSM_DSI_PHY_MASTER); - msm_dsi_phy_set_usecase(clk_slave_dsi->phy, - MSM_DSI_PHY_SLAVE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); - msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); }
return 0; diff --git a/drivers/gpu/drm/msm/msm_dsc_helper.h b/drivers/gpu/drm/msm/msm_dsc_helper.h index b9049fe1e279..63f95523b2cb 100644 --- a/drivers/gpu/drm/msm/msm_dsc_helper.h +++ b/drivers/gpu/drm/msm/msm_dsc_helper.h @@ -12,17 +12,6 @@ #include <linux/math.h> #include <drm/display/drm_dsc_helper.h>
-/** - * msm_dsc_get_slices_per_intf() - calculate number of slices per interface - * @dsc: Pointer to drm dsc config struct - * @intf_width: interface width in pixels - * Returns: Integer representing the number of slices for the given interface - */ -static inline u32 msm_dsc_get_slices_per_intf(const struct drm_dsc_config *dsc, u32 intf_width) -{ - return DIV_ROUND_UP(intf_width, dsc->slice_width); -} - /** * msm_dsc_get_bytes_per_line() - calculate bytes per line * @dsc: Pointer to drm dsc config struct diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index dd0af086e7fa..25db36ec06d4 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -243,17 +243,19 @@ static int __init vkms_init(void) if (!config) return -ENOMEM;
- default_config = config; - config->cursor = enable_cursor; config->writeback = enable_writeback; config->overlay = enable_overlay;
ret = vkms_create(config); - if (ret) + if (ret) { kfree(config); + return ret; + }
- return ret; + default_config = config; + + return 0; }
static void vkms_destroy(struct vkms_config *config) @@ -277,9 +279,10 @@ static void vkms_destroy(struct vkms_config *config)
static void __exit vkms_exit(void) { - if (default_config->dev) - vkms_destroy(default_config); + if (!default_config) + return;
+ vkms_destroy(default_config); kfree(default_config); }
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index f5781939de9c..a25b22238e3d 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -231,6 +231,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) if (ret) return ret;
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev);
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 082a728eac60..f5a06b62b385 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -165,7 +165,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/ obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ -obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 045db6f0fb4c..3dcdd3368b46 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -258,7 +258,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid, ihid->rawbuf, recv_len + sizeof(__le16)); if (error) { dev_err(&ihid->client->dev, - "failed to set a report to device: %d\n", error); + "failed to get a report from device: %d\n", error); return error; }
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index 16f6b7ba2a5d..da4c3425d2d1 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = { static const u16 NCT6776_REG_TOLERANCE_H[] = { 0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 }; -static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index 3949ded0d4fa..c062bcc09466 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -267,7 +267,7 @@ catu_init_sg_table(struct device *catu_dev, int node, * Each table can address upto 1MB and we can have * CATU_PAGES_PER_SYSPAGE tables in a system page. */ - nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE; + nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M); catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages, size >> PAGE_SHIFT, pages); if (IS_ERR(catu_table)) diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 4b80026db1ab..783e259c3761 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1465,18 +1465,20 @@ static void coresight_remove_conns(struct coresight_device *csdev) }
/** - * coresight_timeout - loop until a bit has changed to a specific register - * state. + * coresight_timeout_action - loop until a bit has changed to a specific register + * state, with a callback after every trial. * @csa: coresight device access for the device * @offset: Offset of the register from the base of the device. * @position: the position of the bit of interest. * @value: the value the bit should have. + * @cb: Call back after each trial. * * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if * TIMEOUT_US has elapsed, which ever happens first. */ -int coresight_timeout(struct csdev_access *csa, u32 offset, - int position, int value) +int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb) { int i; u32 val; @@ -1492,7 +1494,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, if (!(val & BIT(position))) return 0; } - + if (cb) + cb(csa, offset, position, value); /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure @@ -1504,6 +1507,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset,
return -EAGAIN; } +EXPORT_SYMBOL_GPL(coresight_timeout_action); + +int coresight_timeout(struct csdev_access *csa, u32 offset, + int position, int value) +{ + return coresight_timeout_action(csa, offset, position, value, NULL); +} EXPORT_SYMBOL_GPL(coresight_timeout);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 840e4cccf8c4..05d9f87e3533 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -399,6 +399,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, } #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val) +{ + if (!csa->io_mem) + isb(); +} + +/* + * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system + * instruction to access the trace unit, each access must be separated by a + * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of + * register updates", for system instructions section, in "Notes": + * + * "In particular, whenever disabling or enabling the trace unit, a poll of + * TRCSTATR needs explicit synchronization between each read of TRCSTATR" + */ +static int etm4x_wait_status(struct csdev_access *csa, int pos, int val) +{ + if (!csa->io_mem) + return coresight_timeout_action(csa, TRCSTATR, pos, val, + etm4x_sys_ins_barrier); + return coresight_timeout(csa, TRCSTATR, pos, val); +} + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -430,7 +453,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb();
/* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); if (drvdata->nr_pe) @@ -523,7 +546,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb();
/* wait for TRCSTATR.IDLE to go back down to '0' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n");
@@ -903,10 +926,25 @@ static void etm4_disable_hw(void *info) tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
+ /* + * As recommended by section 4.3.7 ("Synchronization when using system + * instructions to progrom the trace unit") of ARM IHI 0064H.b, the + * self-hosted trace analyzer must perform a Context synchronization + * event between writing to the TRCPRGCTLR and reading the TRCSTATR. + */ + if (!csa->io_mem) + isb(); + /* wait for TRCSTATR.PMSTABLE to go to '1' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + /* + * As recommended by section 4.3.7 (Synchronization of register updates) + * of ARM IHI 0064H.b. + */ + isb(); + /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_status[i] = @@ -1672,7 +1710,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) etm4_os_lock(drvdata);
/* wait for TRCSTATR.PMSTABLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for PM Stable Status\n"); etm4_os_unlock(drvdata); @@ -1763,7 +1801,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); etm4_os_unlock(drvdata); diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index c5ab39f1e755..652a666909a5 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -951,7 +951,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
/* Create the IBIRULES register for both cases */ i3c_bus_for_each_i3cdev(&master->base.bus, dev) { - if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) + if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP)) continue;
if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index f42a88711486..16a0de6002be 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -711,7 +711,7 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct mma8452_data *data = iio_priv(indio_dev); - int i, ret; + int i, j, ret;
ret = iio_device_claim_direct_mode(indio_dev); if (ret) @@ -771,14 +771,18 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO: - ret = mma8452_get_odr_index(data); + j = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) { - if (mma8452_os_ratio[i][ret] == val) { + if (mma8452_os_ratio[i][j] == val) { ret = mma8452_set_power_mode(data, i); break; } } + if (i == ARRAY_SIZE(mma8452_os_ratio)) { + ret = -EINVAL; + break; + } break; default: ret = -EINVAL; diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c index 6ddcc3c2f840..5927df633e1f 100644 --- a/drivers/iio/accel/msa311.c +++ b/drivers/iio/accel/msa311.c @@ -593,23 +593,25 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev, __le16 axis; int err;
- err = pm_runtime_resume_and_get(dev); + err = iio_device_claim_direct_mode(indio_dev); if (err) return err;
- err = iio_device_claim_direct_mode(indio_dev); - if (err) + err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); return err; + }
mutex_lock(&msa311->lock); err = msa311_get_axis(msa311, chan, &axis); mutex_unlock(&msa311->lock);
- iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct_mode(indio_dev); + if (err) { dev_err(dev, "can't get axis %s (%pe)\n", chan->datasheet_name, ERR_PTR(err)); @@ -755,10 +757,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) unsigned int odr; int err;
- err = pm_runtime_resume_and_get(dev); - if (err) - return err; - /* * Sampling frequency changing is prohibited when buffer mode is * enabled, because sometimes MSA311 chip returns outliers during @@ -768,6 +766,12 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) if (err) return err;
+ err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); + return err; + } + err = -EINVAL; for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++) if (val == msa311_odr_table[odr].integral && @@ -778,11 +782,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) break; }
- iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct_mode(indio_dev); + if (err) dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index e650ebd167b0..644221862851 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -223,6 +223,10 @@ enum ad4130_pin_function { AD4130_PIN_FN_VBIAS = BIT(3), };
+/* + * If you make adaptations in this struct, you most likely also have to adapt + * ad4130_setup_info_eq(), too. + */ struct ad4130_setup_info { unsigned int iout0_val; unsigned int iout1_val; @@ -591,6 +595,40 @@ static irqreturn_t ad4130_irq_handler(int irq, void *private) return IRQ_HANDLED; }
+static bool ad4130_setup_info_eq(struct ad4130_setup_info *a, + struct ad4130_setup_info *b) +{ + /* + * This is just to make sure that the comparison is adapted after + * struct ad4130_setup_info was changed. + */ + static_assert(sizeof(*a) == + sizeof(struct { + unsigned int iout0_val; + unsigned int iout1_val; + unsigned int burnout; + unsigned int pga; + unsigned int fs; + u32 ref_sel; + enum ad4130_filter_mode filter_mode; + bool ref_bufp; + bool ref_bufm; + })); + + if (a->iout0_val != b->iout0_val || + a->iout1_val != b->iout1_val || + a->burnout != b->burnout || + a->pga != b->pga || + a->fs != b->fs || + a->ref_sel != b->ref_sel || + a->filter_mode != b->filter_mode || + a->ref_bufp != b->ref_bufp || + a->ref_bufm != b->ref_bufm) + return false; + + return true; +} + static int ad4130_find_slot(struct ad4130_state *st, struct ad4130_setup_info *target_setup_info, unsigned int *slot, bool *overwrite) @@ -604,8 +642,7 @@ static int ad4130_find_slot(struct ad4130_state *st, struct ad4130_slot_info *slot_info = &st->slots_info[i];
/* Immediately accept a matching setup info. */ - if (!memcmp(target_setup_info, &slot_info->setup, - sizeof(*target_setup_info))) { + if (ad4130_setup_info_eq(target_setup_info, &slot_info->setup)) { *slot = i; return 0; } diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index d2060d394c8d..0e6baf017bfd 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -147,7 +147,11 @@ struct ad7124_chip_info { struct ad7124_channel_config { bool live; unsigned int cfg_slot; - /* Following fields are used to compare equality. */ + /* + * Following fields are used to compare for equality. If you + * make adaptations in it, you most likely also have to adapt + * ad7124_find_similar_live_cfg(), too. + */ struct_group(config_props, enum ad7124_ref_sel refsel; bool bipolar; @@ -334,15 +338,38 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_ struct ad7124_channel_config *cfg) { struct ad7124_channel_config *cfg_aux; - ptrdiff_t cmp_size; int i;
- cmp_size = sizeof_field(struct ad7124_channel_config, config_props); + /* + * This is just to make sure that the comparison is adapted after + * struct ad7124_channel_config was changed. + */ + static_assert(sizeof_field(struct ad7124_channel_config, config_props) == + sizeof(struct { + enum ad7124_ref_sel refsel; + bool bipolar; + bool buf_positive; + bool buf_negative; + unsigned int vref_mv; + unsigned int pga_bits; + unsigned int odr; + unsigned int odr_sel_bits; + unsigned int filter_type; + })); + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live && - !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + cfg->refsel == cfg_aux->refsel && + cfg->bipolar == cfg_aux->bipolar && + cfg->buf_positive == cfg_aux->buf_positive && + cfg->buf_negative == cfg_aux->buf_negative && + cfg->vref_mv == cfg_aux->vref_mv && + cfg->pga_bits == cfg_aux->pga_bits && + cfg->odr == cfg_aux->odr && + cfg->odr_sel_bits == cfg_aux->odr_sel_bits && + cfg->filter_type == cfg_aux->filter_type) return cfg_aux; }
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 56dd030045a2..6769c42e46d4 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -543,6 +543,8 @@ static struct class ib_class = { static void rdma_init_coredev(struct ib_core_device *coredev, struct ib_device *dev, struct net *net) { + bool is_full_dev = &dev->coredev == coredev; + /* This BUILD_BUG_ON is intended to catch layout change * of union of ib_core_device and device. * dev must be the first element as ib_core and providers @@ -554,6 +556,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
coredev->dev.class = &ib_class; coredev->dev.groups = dev->groups; + + /* + * Don't expose hw counters outside of the init namespace. + */ + if (!is_full_dev && dev->hw_stats_attr_index) + coredev->dev.groups[dev->hw_stats_attr_index] = NULL; + device_initialize(&coredev->dev); coredev->owner = dev; INIT_LIST_HEAD(&coredev->port_list); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 58befbaaf0ad..242434c09e8d 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; - int post, ret; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; + int ret = 0;
/* Initialize common scatter list fields */ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; @@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1;
- do { + while (true) { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; @@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), GFP_ATOMIC); - if (!mad_priv) { - ret = -ENOMEM; - break; - } + if (!mad_priv) + return -ENOMEM; } sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, @@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { - kfree(mad_priv); ret = -ENOMEM; - break; + goto free_mad_priv; } mad_priv->header.mapping = sg_list.addr; mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; - - /* Post receive WR */ spin_lock_irqsave(&recv_queue->lock, flags); - post = (++recv_queue->count < recv_queue->max_active); - list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); + if (recv_queue->count >= recv_queue->max_active) { + /* Fully populated the receive queue */ + spin_unlock_irqrestore(&recv_queue->lock, flags); + break; + } + recv_queue->count++; + list_add_tail(&mad_priv->header.mad_list.list, + &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); + ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); - ib_dma_unmap_single(qp_info->port_priv->device, - mad_priv->header.mapping, - mad_priv_dma_size(mad_priv), - DMA_FROM_DEVICE); - kfree(mad_priv); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; } - } while (post); + }
+ ib_dma_unmap_single(qp_info->port_priv->device, + mad_priv->header.mapping, + mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); +free_mad_priv: + kfree(mad_priv); return ret; }
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 9f97bef02149..210092b9bf17 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -988,6 +988,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev) for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++) if (!ibdev->groups[i]) { ibdev->groups[i] = &data->group; + ibdev->hw_stats_attr_index = i; return 0; } WARN(true, "struct ib_device->groups is too small"); diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c index 771059a8eb7d..e349e8d2fb50 100644 --- a/drivers/infiniband/hw/erdma/erdma_cm.c +++ b/drivers/infiniband/hw/erdma/erdma_cm.c @@ -705,7 +705,6 @@ static void erdma_accept_newconn(struct erdma_cep *cep) erdma_cancel_mpatimer(new_cep);
erdma_cep_put(new_cep); - new_cep->sock = NULL; }
if (new_s) { diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 6fa9b1253299..c4c49a3f11b0 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -327,7 +327,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, unsigned int tail = 0; u64 *page_addr_list; void *request_buf; - int err; + int err = 0;
mdev = dev->gdma_dev; gc = mdev->gdma_context; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 9773d2a3d97f..ee9acd58c512 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -487,7 +487,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, }
qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; - if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) { /* We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index f1a0a324223c..7ad5db46ffce 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -274,9 +274,6 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, blk_start_idx = idx; in_block = 1; } - - /* Count page invalidations */ - invalidations += idx - blk_start_idx + 1; } else { u64 umr_offset = idx & umr_block_mask;
@@ -286,14 +283,19 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); in_block = 0; + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; } } } - if (in_block) + if (in_block) { mlx5r_umr_update_xlt(mr, blk_start_idx, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; + }
mlx5_update_odp_stats(mr, invalidations, invalidations);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 214ed81eb0e9..136cb7f7469b 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -147,8 +147,19 @@ static void set_brightness_delayed(struct work_struct *ws) * before this work item runs once. To make sure this works properly * handle LED_SET_BRIGHTNESS_OFF first. */ - if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) + if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) { set_brightness_delayed_set_brightness(led_cdev, LED_OFF); + /* + * The consecutives led_set_brightness(LED_OFF), + * led_set_brightness(LED_FULL) could have been executed out of + * order (LED_FULL first), if the work_flags has been set + * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this + * work. To avoid ending with the LED turned off, turn the LED + * on again. + */ + if (led_cdev->delayed_set_value != LED_OFF) + set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); + }
if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value); @@ -319,10 +330,13 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) * change is done immediately afterwards (before the work runs), * it uses a separate work_flag. */ - if (value) { - led_cdev->delayed_set_value = value; + led_cdev->delayed_set_value = value; + /* Ensure delayed_set_value is seen before work_flags modification */ + smp_mb__before_atomic(); + + if (value) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); - } else { + else { clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); clear_bit(LED_SET_BLINK, &led_cdev->work_flags); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 2f5165918163..cfe59c3255f7 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz) u8 ratio;
if (state->revision == 0x8090) { + u32 internal = dib8000_read32(state, 23) / 1000; + ratio = 4; - unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000); + + unit_khz_dds_val = (1<<26) / (internal ?: 1); if (offset_khz < 0) dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val); else diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c index 7dffea2ad88a..4994a2e65fed 100644 --- a/drivers/media/platform/allegro-dvt/allegro-core.c +++ b/drivers/media/platform/allegro-dvt/allegro-core.c @@ -3914,6 +3914,7 @@ static int allegro_probe(struct platform_device *pdev) if (ret < 0) { v4l2_err(&dev->v4l2_dev, "failed to request firmware: %d\n", ret); + v4l2_device_unregister(&dev->v4l2_dev); return ret; }
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index a9d4ac84a8d8..d1971af5f7fa 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -517,6 +517,7 @@ static void set_buffers(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_stream_len, src_len); hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len); hantro_reg_write(vpu, &g2_strm_start_offset, 0); + hantro_reg_write(vpu, &g2_start_bit, 0); hantro_reg_write(vpu, &g2_write_mvs_e, 1);
hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma); diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index 9b209e687f25..2ce62fe5d60f 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c @@ -385,8 +385,8 @@ static void streamzap_disconnect(struct usb_interface *interface) if (!sz) return;
- rc_unregister_device(sz->rdev); usb_kill_urb(sz->urb_in); + rc_unregister_device(sz->rdev); usb_free_urb(sz->urb_in); usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index d78f73db37c8..ab0985bb5789 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2247,26 +2247,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, goto err; }
- if (of_node_name_eq(child, "nand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible NAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - - if (of_node_name_eq(child, "onenand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible OneNAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - if (of_match_node(omap_nand_ids, child)) { /* NAND specific setup */ val = 8; diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 28027982cf69..509dcb226cbf 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; @@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset) struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; void __iomem *regs = smchip->regbase; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); unsigned long save; unsigned long ddr;
@@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip, { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 13fa8588e38c..0293a4dbe7e0 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -1276,19 +1276,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) /* Check for some optional GPIO controls */ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd", id, GPIOD_OUT_LOW); - if (IS_ERR(slot->vsd)) - return dev_err_probe(host->dev, PTR_ERR(slot->vsd), + if (IS_ERR(slot->vsd)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->vsd), "error looking up VSD GPIO\n"); + goto err_free_host; + } slot->vio = devm_gpiod_get_index_optional(host->dev, "vio", id, GPIOD_OUT_LOW); - if (IS_ERR(slot->vio)) - return dev_err_probe(host->dev, PTR_ERR(slot->vio), + if (IS_ERR(slot->vio)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->vio), "error looking up VIO GPIO\n"); + goto err_free_host; + } slot->cover = devm_gpiod_get_index_optional(host->dev, "cover", id, GPIOD_IN); - if (IS_ERR(slot->cover)) - return dev_err_probe(host->dev, PTR_ERR(slot->cover), + if (IS_ERR(slot->cover)) { + r = dev_err_probe(host->dev, PTR_ERR(slot->cover), "error looking up cover switch GPIO\n"); + goto err_free_host; + }
host->slots[id] = slot;
@@ -1348,6 +1354,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) device_remove_file(&mmc->class_dev, &dev_attr_slot_name); err_remove_host: mmc_remove_host(mmc); +err_free_host: mmc_free_host(mmc); return r; } diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 0a26831b3b67..713ac3032766 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1339,8 +1339,8 @@ static int sdhci_omap_probe(struct platform_device *pdev) /* R1B responses is required to properly manage HW busy detection. */ mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
- /* Allow card power off and runtime PM for eMMC/SD card devices */ - mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM; + /* Enable SDIO card power off. */ + mmc->caps |= MMC_CAP_POWER_OFF_CARD;
ret = sdhci_setup_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 3af43ac05825..376fd927ae73 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -399,6 +399,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core);
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 7b5c8bb02f11..e7db6a4e4dc9 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -250,18 +250,33 @@ static int com20020pci_probe(struct pci_dev *pdev, card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-tx", dev->dev_id, i); + if (!card->tx_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:green:tx:%d-%d", dev->dev_id, i); - + if (!card->tx_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.dev = &dev->dev; card->recon_led.brightness_set = led_recon_set; card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-recon", dev->dev_id, i); + if (!card->recon_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:red:recon:%d-%d", dev->dev_id, i); + if (!card->recon_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.dev = &dev->dev;
ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index a39b33353ca6..8b01ee3e684a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -7156,13 +7156,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) err = mv88e6xxx_switch_reset(chip); mv88e6xxx_reg_unlock(chip); if (err) - goto out; + goto out_phy;
if (np) { chip->irq = of_irq_get(np, 0); if (chip->irq == -EPROBE_DEFER) { err = chip->irq; - goto out; + goto out_phy; } }
@@ -7181,7 +7181,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_reg_unlock(chip);
if (err) - goto out; + goto out_phy;
if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); @@ -7215,6 +7215,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); +out_phy: + mv88e6xxx_phy_destroy(chip); out: if (pdata) dev_put(pdata->netdev); @@ -7237,7 +7239,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_ptp_free(chip); }
- mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip); @@ -7250,6 +7251,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); + + mv88e6xxx_phy_destroy(chip); }
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 8bb88b3d900d..ee9e5d7e5277 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -229,7 +229,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip) { + mutex_lock(&chip->ppu_mutex); del_timer_sync(&chip->ppu_timer); + cancel_work_sync(&chip->ppu_work); + mutex_unlock(&chip->ppu_mutex); }
int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index a8d79ee350f8..a332a0e3154a 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1824,18 +1824,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, long value = simple_strtol(buf, NULL, 10); long rc;
+ rtnl_lock(); + if (attr == &veth_active_attr) { if (value && !pool->active) { if (netif_running(netdev)) { if (ibmveth_alloc_buffer_pool(pool)) { netdev_err(netdev, "unable to alloc pool\n"); - return -ENOMEM; + rc = -ENOMEM; + goto unlock_err; } pool->active = 1; ibmveth_close(netdev); - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->active = 1; } @@ -1855,48 +1859,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
if (i == IBMVETH_NUM_BUFF_POOLS) { netdev_err(netdev, "no active pool >= MTU\n"); - return -EPERM; + rc = -EPERM; + goto unlock_err; }
if (netif_running(netdev)) { ibmveth_close(netdev); pool->active = 0; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { ibmveth_close(netdev); pool->size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->size = value; } } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { ibmveth_close(netdev); pool->buff_size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->buff_size = value; } } } + rtnl_unlock();
/* kick the interrupt handler to allocate/deallocate pools */ ibmveth_interrupt(netdev->irq, netdev); return count; + +unlock_err: + rtnl_unlock(); + return rc; }
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 63c3c79380a1..0a35d36c2c85 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -808,4 +808,7 @@ /* SerDes Control */ #define E1000_GEN_POLL_TIMEOUT 640
+#define E1000_FEXTNVM12_PHYPD_CTRL_MASK 0x00C00000 +#define E1000_FEXTNVM12_PHYPD_CTRL_P1 0x00800000 + #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2f9655cf5dd9..364378133526 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -285,6 +285,45 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) } }
+/** + * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to + * align to MTP and later platform requirements. + * @hw: pointer to the HW structure + * + * Context: PHY semaphore must be held by caller. + * Return: 0 on success, negative on failure + */ +static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) +{ + u16 phy_timeout; + u32 fextnvm12; + s32 ret_val; + + if (hw->mac.type < e1000_pch_mtp) + return 0; + + /* Change Kumeran K1 power down state from P0s to P1 */ + fextnvm12 = er32(FEXTNVM12); + fextnvm12 &= ~E1000_FEXTNVM12_PHYPD_CTRL_MASK; + fextnvm12 |= E1000_FEXTNVM12_PHYPD_CTRL_P1; + ew32(FEXTNVM12, fextnvm12); + + /* Wait for the interface the settle */ + usleep_range(1000, 1100); + + /* Change K1 exit timeout */ + ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG, + &phy_timeout); + if (ret_val) + return ret_val; + + phy_timeout &= ~I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK; + phy_timeout |= 0xF00; + + return e1e_wphy_locked(hw, I217_PHY_TIMEOUTS_REG, + phy_timeout); +} + /** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure @@ -327,15 +366,22 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * LANPHYPC Value bit to force the interconnect to PCIe mode. */ switch (hw->mac.type) { + case e1000_pch_mtp: + case e1000_pch_lnp: + case e1000_pch_ptp: + case e1000_pch_nvp: + /* At this point the PHY might be inaccessible so don't + * propagate the failure + */ + if (e1000_reconfigure_k1_exit_timeout(hw)) + e_dbg("Failed to reconfigure K1 exit timeout\n"); + + fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: - case e1000_pch_mtp: - case e1000_pch_lnp: - case e1000_pch_ptp: - case e1000_pch_nvp: if (e1000_phy_is_accessible_pchlan(hw)) break;
@@ -419,8 +465,20 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) * the PHY is in. */ ret_val = hw->phy.ops.check_reset_block(hw); - if (ret_val) + if (ret_val) { e_err("ME blocked access to PHY after reset\n"); + goto out; + } + + if (hw->mac.type >= e1000_pch_mtp) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Failed to reconfigure K1 exit timeout\n"); + goto out; + } + ret_val = e1000_reconfigure_k1_exit_timeout(hw); + hw->phy.ops.release(hw); + } }
out: @@ -4888,6 +4946,18 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) u16 i;
e1000_initialize_hw_bits_ich8lan(hw); + if (hw->mac.type >= e1000_pch_mtp) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_reconfigure_k1_exit_timeout(hw); + hw->phy.ops.release(hw); + if (ret_val) { + e_dbg("Error failed to reconfigure K1 exit timeout\n"); + return ret_val; + } + }
/* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2504b11c3169..5feb589a9b5f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -219,6 +219,10 @@ #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) #define I217_PLL_CLOCK_GATE_MASK 0x07FF
+/* PHY Timeouts */ +#define I217_PHY_TIMEOUTS_REG PHY_REG(770, 21) +#define I217_PHY_TIMEOUTS_K1_EXIT_TO_MASK 0x0FC0 + #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 9e02e4367bec..9bd3d76b5fe2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1108,6 +1108,9 @@ struct mvpp2 {
/* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; };
struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 34051c9abd97..fce57faf345c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7615,8 +7615,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23;
- /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock);
/* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40..93e978bdf303 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i;
+ lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL;
@@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) }
/* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i;
+ lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL;
@@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; }
+int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe);
/* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) }
/* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid;
+ lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
/* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); }
+void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid);
match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); }
/* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); }
/* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err;
- priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue;
- mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask);
@@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
/* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); }
/* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock); return 0; }
@@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid;
- /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + spin_lock_bh(&priv->prs_spinlock);
- /* No such entry */ - if (tid < 0) - return; + /* Invalidate TCAM entry with this <vid,port>, if it exists */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + }
- mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); }
/* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid;
+ spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); }
/* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv;
+ spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid);
priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); }
/* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock); + pe.index = tid;
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); }
/* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i;
+ priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
@@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index);
- priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv);
err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv);
- return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; }
/* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, }
/* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; }
+int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid;
+ spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue;
- mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid);
pmap = mvpp2_prs_tcam_port_map_get(&pe);
@@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue;
/* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); }
int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break;
case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break;
case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break;
default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + }
pe.index = tid;
@@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe);
+ spin_unlock_bh(&priv->prs_spinlock); return 0; }
@@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port)
memset(&pe, 0, sizeof(pe));
+ spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id);
/* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + }
pe.index = tid;
@@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); }
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe);
+ spin_unlock_bh(&port->priv->prs_spinlock); return 0; }
@@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL;
+ spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
+ spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 524173722223..67e6d755b30e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2563,7 +2563,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); - vfs -= 64; + vfs = 64; }
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index bffe04e6d025..774d8b034725 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -217,7 +217,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu) rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; }
- for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 30507b7c2fb1..775010e94cb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -408,7 +408,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * - PAGE_SIZE; + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); } @@ -881,7 +881,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); @@ -1094,7 +1095,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 7b3739b29c8f..bb0bf1415872 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -630,6 +630,16 @@ static const struct driver_info zte_rndis_info = { .tx_fixup = rndis_tx_fixup, };
+static const struct driver_info wwan_rndis_info = { + .description = "Mobile Broadband RNDIS device", + .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, + .bind = rndis_bind, + .unbind = rndis_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, +}; + /*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = { @@ -666,9 +676,11 @@ static const struct usb_device_id products [] = { USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { - /* Novatel Verizon USB730L */ + /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and + * Telit FN990A (RNDIS) + */ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), - .driver_info = (unsigned long) &rndis_info, + .driver_info = (unsigned long)&wwan_rndis_info, }, { }, // END }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 08cbc8e4b361..ac0458b96738 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -530,7 +530,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) netif_device_present (dev->net) && test_bit(EVENT_DEV_OPEN, &dev->flags) && !test_bit (EVENT_RX_HALT, &dev->flags) && - !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { + !test_bit (EVENT_DEV_ASLEEP, &dev->flags) && + !usbnet_going_away(dev)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); @@ -551,8 +552,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - if (!usbnet_going_away(dev)) - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 7710367c319e..14f3c4900f56 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1167,6 +1167,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; mmc_pm_flag_t sdio_flags; + bool cap_power_off; int ret = 0;
func = container_of(dev, struct sdio_func, dev); @@ -1174,19 +1175,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev) if (func->num != 1) return 0;
+ cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
bus_if = dev_get_drvdata(dev); sdiodev = bus_if->bus_priv.sdio;
- if (sdiodev->wowl_enabled) { + if (sdiodev->wowl_enabled || !cap_power_off) { brcmf_sdiod_freezer_on(sdiodev); brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER; - if (sdiodev->settings->bus.sdio.oob_irq_supported) - enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); - else - sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + + if (sdiodev->wowl_enabled) { + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + else + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + }
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) brcmf_err("Failed to set pm_flags %x\n", sdio_flags); @@ -1208,18 +1213,19 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct sdio_func *func = container_of(dev, struct sdio_func, dev); int ret = 0; + bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num); if (func->num != 2) return 0;
- if (!sdiodev->wowl_enabled) { + if (!sdiodev->wowl_enabled && cap_power_off) { /* bus was powered off and device removed, probe again */ ret = brcmf_sdiod_probe(sdiodev); if (ret) brcmf_err("Failed to probe device on resume\n"); } else { - if (sdiodev->settings->bus.sdio.oob_irq_supported) + if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported) disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 2a408e1ce06e..0a1f302ad6d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -559,41 +559,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, }
/* - * alloc_sgtable - allocates scallerlist table in the given size, - * fills it with pages and returns it + * alloc_sgtable - allocates (chained) scatterlist in the given size, + * fills it with pages and returns it * @size: the size (in bytes) of the table -*/ -static struct scatterlist *alloc_sgtable(int size) + */ +static struct scatterlist *alloc_sgtable(ssize_t size) { - int alloc_size, nents, i; - struct page *new_page; - struct scatterlist *iter; - struct scatterlist *table; + struct scatterlist *result = NULL, *prev; + int nents, i, n_prev;
nents = DIV_ROUND_UP(size, PAGE_SIZE); - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); - if (!table) - return NULL; - sg_init_table(table, nents); - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) { - /* release all previous allocated pages in the table */ - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = sg_page(iter); - if (new_page) - __free_page(new_page); - } - kfree(table); + +#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result)) + /* + * We need an additional entry for table chaining, + * this ensures the loop can finish i.e. we can + * fit at least two entries per page (obviously, + * many more really fit.) + */ + BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2); + + while (nents > 0) { + struct scatterlist *new, *iter; + int n_fill, n_alloc; + + if (nents <= N_ENTRIES_PER_PAGE) { + /* last needed table */ + n_fill = nents; + n_alloc = nents; + nents = 0; + } else { + /* fill a page with entries */ + n_alloc = N_ENTRIES_PER_PAGE; + /* reserve one for chaining */ + n_fill = n_alloc - 1; + nents -= n_fill; + } + + new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL); + if (!new) { + if (result) + _devcd_free_sgtable(result); return NULL; } - alloc_size = min_t(int, size, PAGE_SIZE); - size -= PAGE_SIZE; - sg_set_page(iter, new_page, alloc_size, 0); + sg_init_table(new, n_alloc); + + if (!result) + result = new; + else + sg_chain(prev, n_prev, new); + prev = new; + n_prev = n_alloc; + + for_each_sg(new, iter, n_fill, i) { + struct page *new_page = alloc_page(GFP_KERNEL); + + if (!new_page) { + _devcd_free_sgtable(result); + return NULL; + } + + sg_set_page(iter, new_page, PAGE_SIZE, 0); + } } - return table; + + return result; }
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 8cff24d5f5f4..e4efd3349bc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -1275,7 +1275,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 rate_n_flags = phy_data->rate_n_flags; - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU; @@ -1330,13 +1330,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
if (he_mu) he_mu->flags2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) + else if (he_type == RATE_MCS_HE_TYPE_TRIG) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c index ffcfc3e02c35..a5aa96a31f4a 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -215,6 +215,9 @@ static int gen3_init_ntb(struct intel_ntb_dev *ndev) }
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* Make sure we are not using DB's used for link status */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) + ndev->db_valid_mask &= ~ndev->db_link_mask;
ndev->reg->db_iowrite(ndev->db_valid_mask, ndev->self_mmio + diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 0a94c634ddc2..b5f93f07e22a 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -288,7 +288,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, if (size != 0 && xlate_pos < 12) return -EINVAL;
- if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { + if (xlate_pos >= 0 && !IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { /* * In certain circumstances we can get a buffer that is * not aligned to its size. (Most of the time diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 72bc1d017a46..dfd175f79e8f 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_thread *pthr, dma_set_unmap(tx, unmap);
ret = dma_submit_error(dmaengine_submit(tx)); - if (ret) { - dmaengine_unmap_put(unmap); + if (ret) goto err_free_resource; - }
dmaengine_unmap_put(unmap);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a36ec6df6624..a763df0200ab 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1282,8 +1282,19 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd = { }; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ /* + * Shutdown the device immediately if we see it is disconnected. This + * unblocks PCIe error handling if the nvme driver is waiting in + * error_resume for a device that has been removed. We can't unbind the + * driver while the driver's error callback is waiting to complete, so + * we're relying on a timeout to break that deadlock if a removal + * occurs while reset work is running. + */ + if (pci_dev_is_disconnected(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); if (nvme_state_terminal(&dev->ctrl)) goto disable;
@@ -1291,7 +1302,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * the recovery mechanism will surely fail. */ mb(); - if (pci_channel_offline(to_pci_dev(dev->dev))) + if (pci_channel_offline(pdev)) return BLK_EH_RESET_TIMER;
/* @@ -1850,6 +1861,18 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (offset > bar_size) return;
+ /* + * Controllers may support a CMB size larger than their BAR, for + * example, due to being behind a bridge. Reduce the CMB to the + * reported size of the BAR + */ + size = min(size, bar_size - offset); + + if (!IS_ALIGNED(size, memremap_compat_align()) || + !IS_ALIGNED(pci_resource_start(pdev, bar), + memremap_compat_align())) + return; + /* * Tell the controller about the host side address mapping the CMB, * and enable CMB decoding for the NVMe 1.4+ scheme: @@ -1860,17 +1883,10 @@ static void nvme_map_cmb(struct nvme_dev *dev) dev->bar + NVME_REG_CMBMSC); }
- /* - * Controllers may support a CMB size larger than their BAR, - * for example, due to being behind a bridge. Reduce the CMB to - * the reported size of the BAR - */ - if (size > bar_size - offset) - size = bar_size - offset; - if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { dev_warn(dev->ctrl.device, "failed to register the CMB\n"); + hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); return; }
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 2a8bd812b1a6..84db7f4f861c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2467,6 +2467,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; + int ret;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) return 0; @@ -2474,9 +2475,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) set_bit(NVME_TCP_Q_POLLING, &queue->flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); - nvme_tcp_try_recv(queue); + ret = nvme_tcp_try_recv(queue); clear_bit(NVME_TCP_Q_POLLING, &queue->flags); - return queue->nr_cqe; + return ret < 0 ? ret : queue->nr_cqe; }
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index b8b655d4047e..a87dab9abba2 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -354,8 +354,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, spin_unlock_irqrestore(&ep->lock, flags);
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; + CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); }
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index d55dfd173f22..9efb71cbe699 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -240,7 +240,7 @@ struct cdns_pcie_rp_ib_bar { #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA BIT(16) +#define CDNS_PCIE_MSG_DATA BIT(16)
struct cdns_pcie;
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index fd484cc7c481..335b26635ee9 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev) ret = histb_pcie_host_enable(pp); if (ret) { dev_err(dev, "failed to enable host\n"); - return ret; + goto err_exit_phy; }
ret = dw_pcie_host_init(pp); if (ret) { dev_err(dev, "failed to initialize host\n"); - return ret; + goto err_exit_phy; }
return 0; + +err_exit_phy: + phy_exit(hipcie->phy); + + return ret; }
static void histb_pcie_remove(struct platform_device *pdev) @@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
histb_pcie_host_disable(hipcie);
- if (hipcie->phy) - phy_exit(hipcie->phy); + phy_exit(hipcie->phy); }
static const struct of_device_id histb_pcie_of_match[] = { diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e47a77f943b1..44d385f5c27c 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -378,10 +378,10 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); - u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; - writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
lnkctl2 = (lnkctl2 & ~0xf) | gen; writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); @@ -1132,7 +1132,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) { - dev_info(dev, "No regulators for downstream device\n"); + dev_info(dev, "Did not get regulators, err=%d\n", ret); + pcie->sr = NULL; goto no_regulators; }
@@ -1155,7 +1156,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus) struct subdev_regulators *sr = pcie->sr; struct device *dev = &bus->dev;
- if (!sr) + if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index 4a787a941674..51379d791005 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -594,15 +594,17 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) return err;
bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); - if (!bus) - return -ENODEV; + if (!bus) { + err = -ENODEV; + goto err_free_irq_domains; + }
port->variant = of_device_get_match_data(dev);
err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); - goto err_parse_dt; + goto err_free_irq_domains; }
xilinx_cpm_pcie_init_port(port); @@ -626,7 +628,7 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) xilinx_cpm_free_interrupts(port); err_setup_irq: pci_ecam_free(port->cfg); -err_parse_dt: +err_free_irq_domains: xilinx_cpm_free_irq_domains(port); return err; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index fd713abdfb9f..b0bccc4d0da2 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -839,7 +839,9 @@ void pcie_enable_interrupt(struct controller *ctrl) { u16 mask;
- mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; + mask = PCI_EXP_SLTCTL_DLLSCE; + if (!pciehp_poll_mode) + mask |= PCI_EXP_SLTCTL_HPIE; pcie_write_cmd(ctrl, mask, mask); }
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 095fa1910d36..bcce569a8339 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5714,6 +5714,8 @@ static bool pci_bus_resettable(struct pci_bus *bus) return false;
list_for_each_entry(dev, &bus->devices, bus_list) { + if (!pci_reset_supported(dev)) + return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; @@ -5790,6 +5792,8 @@ static bool pci_slot_resettable(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; + if (!pci_reset_supported(dev)) + return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 0aef6dc055b9..4e995ca4de01 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -977,16 +977,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) parent_link = link->parent;
/* - * link->downstream is a pointer to the pci_dev of function 0. If - * we remove that function, the pci_dev is about to be deallocated, - * so we can't use link->downstream again. Free the link state to - * avoid this. + * Free the parent link state, no later than function 0 (i.e. + * link->downstream) being removed. * - * If we're removing a non-0 function, it's possible we could - * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends - * programming the same ASPM Control value for all functions of - * multi-function devices, so disable ASPM for all of them. + * Do not free the link state any earlier. If function 0 is a + * switch upstream port, this link state is parent_link to all + * subordinate ones. */ + if (pdev != link->downstream) + goto out; + pcie_config_aspm_link(link, 0); list_del(&link->sibling); free_link_state(link); @@ -997,6 +997,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) pcie_config_aspm_path(parent_link); }
+ out: mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index 46fad0d813b2..d6e5fef54c3b 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -227,10 +227,12 @@ static int get_port_device_capability(struct pci_dev *dev)
/* * Disable hot-plug interrupts in case they have been enabled - * by the BIOS and the hot-plug service driver is not loaded. + * by the BIOS and the hot-plug service driver won't be loaded + * to handle them. */ - pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); }
#ifdef CONFIG_PCIEAER diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 03b519a22840..bcd1ba829e1f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -927,10 +927,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); - if (err) { - put_device(&bridge->dev); + if (err) goto free; - } + bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 5a143ad5fca2..fba402f4f633 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -2018,8 +2018,7 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus, * in case of root bus. */ if (bridge && pci_bridge_resources_not_assigned(dev)) - pci_bridge_distribute_available_resources(bridge, - add_list); + pci_bridge_distribute_available_resources(dev, add_list); else pci_root_bus_distribute_available_resources(b, add_list); } diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 3be04ab760d3..9775f6be1c1e 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1524,7 +1524,6 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl, .clk_rate = 19200000, .npwm = 1, .base_unit_bits = 22, - .bypass = true, }; struct pwm_lpss_chip *pwm;
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index 990b96d45967..c5d733216508 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -253,6 +253,8 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv) return ret; }
+ of_node_put(of_args.np); + if ((of_args.args[0] != 0) || (of_args.args[1] != 0) || (of_args.args[2] != priv->npins)) { diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 2ea6ef99cc70..ac629c72d592 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -1367,6 +1367,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) return ret; }
+ of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) { dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 52aeafaba4b6..b89ae65e71b0 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -944,6 +944,8 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl) return ret; }
+ of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) { dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 734c71ef005b..7c12a3470642 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -272,6 +272,9 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, val = pmx_readl(pmx, g->mux_bank, g->mux_reg); val &= ~(0x3 << g->mux_bit); val |= i << g->mux_bit; + /* Set the SFIO/GPIO selection to SFIO when under pinmux control*/ + if (pmx->soc->sfsel_in_mux) + val |= (1 << g->sfsel_bit); pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
return 0; diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c index db1e9240dd02..8fb434b6ab4b 100644 --- a/drivers/platform/x86/dell/dell-wmi-ddv.c +++ b/drivers/platform/x86/dell/dell-wmi-ddv.c @@ -665,8 +665,10 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char if (ret < 0) return ret;
- /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */ - return sysfs_emit(buf, "%d\n", value - 2731); + /* Use 2732 instead of 2731.5 to avoid unnecessary rounding and to emulate + * the behaviour of the OEM application which seems to round down the result. + */ + return sysfs_emit(buf, "%d\n", value - 2732); }
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 7457ca2b27a6..36209997ba98 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -128,6 +128,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), }, }, + { + .ident = "Microsoft Surface Go 4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"), + }, + }, { } };
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 9040a3d39924..7760ecab3e83 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -84,7 +84,7 @@ static DECLARE_HASHTABLE(isst_hash, 8); static DEFINE_MUTEX(isst_hash_lock);
static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, - u32 data) + u64 data) { struct isst_cmd *sst_cmd;
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 343ab6a82c01..666ed3698afe 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -420,6 +420,11 @@ static const struct intel_vsec_platform_info oobmsm_info = { .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI, };
+/* DMR OOBMSM info */ +static const struct intel_vsec_platform_info dmr_oobmsm_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI, +}; + /* TGL info */ static const struct intel_vsec_platform_info tgl_info = { .caps = VSEC_CAP_TELEMETRY, @@ -431,6 +436,7 @@ static const struct intel_vsec_platform_info tgl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d #define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 +#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d static const struct pci_device_id intel_vsec_pci_ids[] = { @@ -439,6 +445,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, { } diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index 794c8c054450..0e7b3277d481 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -556,7 +556,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg case 4700000: case 4800000: case 4900000: - data = (uvolt - 4700000) / 100000; + data = ((uvolt - 4700000) / 100000) + 1; break; default: dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n"); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 2d717f2ed396..8f93489aa0d4 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1838,6 +1838,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, while (pd_names[num_pds]) num_pds++;
+ /* Handle single power domain */ + if (num_pds == 1 && dev->pm_domain) { + devs[0] = dev; + pm_runtime_enable(dev); + return 1; + } + for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -1858,8 +1865,15 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, size_t pd_count) { + struct device *dev = qproc->dev; int i;
+ /* Handle single power domain */ + if (pd_count == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + for (i = 0; i < pd_count; i++) dev_pm_domain_detach(pds[i], false); } @@ -2471,13 +2485,13 @@ static const struct rproc_hexagon_res msm8974_mss = { .supply = "pll", .uA = 100000, }, - {} - }, - .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "mx", .uV = 1050000, }, + {} + }, + .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "cx", .uA = 100000, @@ -2503,7 +2517,6 @@ static const struct rproc_hexagon_res msm8974_mss = { NULL }, .proxy_pd_names = (char*[]){ - "mx", "cx", NULL }, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index fd6bf9e77afc..f6336bf4a38b 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -490,16 +490,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs, if (!pd_names) return 0;
+ while (pd_names[num_pds]) + num_pds++; + /* Handle single power domain */ - if (dev->pm_domain) { + if (num_pds == 1 && dev->pm_domain) { devs[0] = dev; pm_runtime_enable(dev); return 1; }
- while (pd_names[num_pds]) - num_pds++; - for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -524,7 +524,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, int i;
/* Handle single power domain */ - if (dev->pm_domain && pd_count) { + if (pd_count == 1 && dev->pm_domain) { pm_runtime_disable(dev); return; } @@ -1240,7 +1240,7 @@ static const struct adsp_data sm8550_mpss_resource = { };
static const struct of_device_id adsp_of_match[] = { - { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init}, + { .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource}, diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index e230af51a99b..0c363ca566ff 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2024,6 +2024,7 @@ int rproc_shutdown(struct rproc *rproc) kfree(rproc->cached_table); rproc->cached_table = NULL; rproc->table_ptr = NULL; + rproc->table_sz = 0; out: mutex_unlock(&rproc->lock); return ret; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 91d12198cc6c..0a3a5af67f0a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2883,7 +2883,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work) static void qla_trace_init(void) { - qla_trc_array = trace_array_get_by_name("qla2xxx"); + qla_trc_array = trace_array_get_by_name("qla2xxx", NULL); if (!qla_trc_array) { ql_log(ql_log_fatal, NULL, 0x0001, "Unable to create qla2xxx trace instance, instance logging will be disabled.\n"); diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index 060c2982e26b..0aadfc201028 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -12,6 +12,7 @@ static void sdw_slave_release(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ of_node_put(slave->dev.of_node); mutex_destroy(&slave->sdw_dev_lock); kfree(slave); } diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig index f23e29b679fb..14afcbbd6104 100644 --- a/drivers/staging/rtl8723bs/Kconfig +++ b/drivers/staging/rtl8723bs/Kconfig @@ -5,6 +5,7 @@ config RTL8723BS depends on m select CFG80211_WEXT select CRYPTO + select CRYPTO_LIB_AES select CRYPTO_LIB_ARC4 help This option enables support for RTL8723BS SDIO drivers, such as diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c index 43fa351e2b9e..b7fdf25bfd23 100644 --- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c @@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev) struct int3402_thermal_data *d; int ret;
+ if (!adev) + return -ENODEV; + if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index e05341b85c59..788035f0c1ab 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -491,7 +491,8 @@ static int do_output_char(u8 c, struct tty_struct *tty, int space) static int process_output(u8 c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - int space, retval; + unsigned int space; + int retval;
mutex_lock(&ldata->output_lock);
@@ -527,16 +528,16 @@ static ssize_t process_output_block(struct tty_struct *tty, const u8 *buf, unsigned int nr) { struct n_tty_data *ldata = tty->disc_data; - int space; - int i; + unsigned int space; + int i; const u8 *cp;
mutex_lock(&ldata->output_lock);
space = tty_write_room(tty); - if (space <= 0) { + if (space == 0) { mutex_unlock(&ldata->output_lock); - return space; + return 0; } if (nr > space) nr = space; @@ -701,7 +702,7 @@ static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail, static size_t __process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; - int space, old_space; + unsigned int space, old_space; size_t tail; u8 c;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b0137eac7ab3..fbc486546b85 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2318,10 +2318,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) page_size = readl(&xhci->op_regs->page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size register = 0x%x", page_size); - i = ffs(page_size); - if (i < 16) + val = ffs(page_size) - 1; + if (val < 16) xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); + "Supported page size of %iK", (1 << (val + 12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index cf3c8e552def..7c7f388aac96 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -1366,11 +1366,10 @@ static int ucsi_ccg_probe(struct i2c_client *client) uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA; else if (!strcmp(fw_name, "nvidia,gpu")) uc->fw_build = CCG_FW_BUILD_NVIDIA; + if (!uc->fw_build) + dev_err(uc->dev, "failed to get FW build information\n"); }
- if (!uc->fw_build) - dev_err(uc->dev, "failed to get FW build information\n"); - /* reset ccg device and initialize ucsi */ status = ucsi_ccg_init(uc); if (status < 0) { diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 99813232c25e..8d8a22504d71 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1688,14 +1688,19 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } }
+ if (vs->vs_tpg) { + pr_err("vhost-scsi endpoint already set for %s.\n", + vs->vs_vhost_wwpn); + ret = -EEXIST; + goto out; + } + len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; vs_tpg = kzalloc(len, GFP_KERNEL); if (!vs_tpg) { ret = -ENOMEM; goto out; } - if (vs->vs_tpg) - memcpy(vs_tpg, vs->vs_tpg, len);
mutex_lock(&vhost_scsi_mutex); list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { @@ -1711,12 +1716,6 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, tv_tport = tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { - if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) { - mutex_unlock(&tpg->tv_tpg_mutex); - mutex_unlock(&vhost_scsi_mutex); - ret = -EEXIST; - goto undepend; - } /* * In order to ensure individual vhost-scsi configfs * groups cannot be removed while in use by vhost ioctl, @@ -1763,15 +1762,15 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, } ret = 0; } else { - ret = -EEXIST; + ret = -ENODEV; + goto free_tpg; }
/* - * Act as synchronize_rcu to make sure access to - * old vs->vs_tpg is finished. + * Act as synchronize_rcu to make sure requests after this point + * see a fully setup device. */ vhost_scsi_flush(vs); - kfree(vs->vs_tpg); vs->vs_tpg = vs_tpg; goto out;
@@ -1791,6 +1790,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, target_undepend_item(&tpg->se_tpg.tpg_group.cg_item); } } +free_tpg: kfree(vs_tpg); out: mutex_unlock(&vs->dev.mutex); @@ -1893,6 +1893,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vhost_scsi_flush(vs); kfree(vs->vs_tpg); vs->vs_tpg = NULL; + memset(vs->vs_vhost_wwpn, 0, sizeof(vs->vs_vhost_wwpn)); WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); return 0; diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 30577b1d3de5..cdbcb86ff394 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -24,7 +24,7 @@ config VGA_CONSOLE Say Y.
config MDA_CONSOLE - depends on !M68K && !PARISC && ISA + depends on VGA_CONSOLE && ISA tristate "MDA text console (dual-headed)" help Say Y here if you have an old MDA or monochrome Hercules graphics diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 648d6cac86e8..682258968079 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -137,13 +137,15 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) */ int au1100fb_setmode(struct au1100fb_device *fbdev) { - struct fb_info *info = &fbdev->info; + struct fb_info *info; u32 words; int index;
if (!fbdev) return -EINVAL;
+ info = &fbdev->info; + /* Update var-dependent FB info */ if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) { if (info->var.bits_per_pixel <= 8) { diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 65c799ac5604..b9d72f368c6c 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -326,6 +326,13 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, if (var->xres_virtual > 4096 || var->yres_virtual > 2048) return -EINVAL;
+ /* geometry sanity checks */ + if (var->xres + var->xoffset > var->xres_virtual) + return -EINVAL; + + if (var->yres + var->yoffset > var->yres_virtual) + return -EINVAL; + /* can cope with 8,16 or 32bpp */
if (var->bits_per_pixel <= 8) diff --git a/fs/affs/file.c b/fs/affs/file.c index 04c018e19602..93b319917c9a 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -597,7 +597,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) BUG_ON(tmp > bsize); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_fix_checksum(sb, bh); bh->b_state &= ~(1UL << BH_New); @@ -726,7 +726,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); - be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); + AFFS_DATA_HEAD(bh)->size = cpu_to_be32( + max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size))); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; @@ -748,7 +749,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); @@ -782,7 +783,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 021cf468274b..af03a1c6ba76 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5540,7 +5540,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, eb, 1); else ret = btrfs_dec_ref(trans, root, eb, 0); - BUG_ON(ret); /* -ENOMEM */ + if (ret) { + btrfs_abort_transaction(trans, ret); + return ret; + } if (is_fstree(root->root_key.objectid)) { ret = btrfs_qgroup_trace_leaf_items(trans, eb); if (ret) { diff --git a/fs/exec.c b/fs/exec.c index 4a6255aa4ea7..ee71a315cc51 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1257,13 +1257,12 @@ int begin_new_exec(struct linux_binprm * bprm) */ bprm->point_of_no_return = true;
- /* - * Make this the only thread in the thread group. - */ + /* Make this the only thread in the thread group */ retval = de_thread(me); if (retval) goto out; - + /* see the comment in check_unsafe_exec() */ + current->fs->in_exec = 0; /* * Cancel any io_uring activity across execve */ @@ -1516,6 +1515,8 @@ static void free_bprm(struct linux_binprm *bprm) } free_arg_pages(bprm); if (bprm->cred) { + /* in case exec fails before de_thread() succeeds */ + current->fs->in_exec = 0; mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } @@ -1604,6 +1605,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm) * suid exec because the differently privileged task * will be able to manipulate the current directory, etc. * It would be nice to force an unshare instead... + * + * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS) + * from another sub-thread until de_thread() succeeds, this + * state is protected by cred_guard_mutex we hold. */ t = p; n_fs = 1; @@ -1890,7 +1895,6 @@ static int bprm_execve(struct linux_binprm *bprm,
sched_mm_cid_after_execve(current); /* execve succeeded */ - current->fs->in_exec = 0; current->in_execve = 0; rseq_execve(current); user_events_execve(current); @@ -1910,7 +1914,6 @@ static int bprm_execve(struct linux_binprm *bprm,
out_unmark: sched_mm_cid_after_execve(current); - current->fs->in_exec = 0; current->in_execve = 0;
return retval; diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 74590041fb2c..24e1e05f9f34 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -265,7 +265,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain, clu = next; if (exfat_ent_get(sb, clu, &next)) return -EIO; - } while (next != EXFAT_EOF_CLUSTER); + } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size);
if (p_chain->size != count) { exfat_fs_error(sb, diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 7ea33c3fe94e..6682b8ab11f1 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; + else if (unlikely(next_offset == size && de->name_len == 1 && + de->name[0] == '.')) + error_msg = "'.' directory cannot be the last in data block"; else return 0;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f019ce64eba4..a4d7af7495b7 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -6808,22 +6808,29 @@ static int ext4_statfs_project(struct super_block *sb, dquot->dq_dqb.dqb_bhardlimit); limit >>= sb->s_blocksize_bits;
- if (limit && buf->f_blocks > limit) { + if (limit) { + uint64_t remaining = 0; + curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; - buf->f_blocks = limit; - buf->f_bfree = buf->f_bavail = - (buf->f_blocks > curblock) ? - (buf->f_blocks - curblock) : 0; + if (limit > curblock) + remaining = limit - curblock; + + buf->f_blocks = min(buf->f_blocks, limit); + buf->f_bfree = min(buf->f_bfree, remaining); + buf->f_bavail = min(buf->f_bavail, remaining); }
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, dquot->dq_dqb.dqb_ihardlimit); - if (limit && buf->f_files > limit) { - buf->f_files = limit; - buf->f_ffree = - (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? - (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dquot->dq_dqb.dqb_curinodes) + remaining = limit - dquot->dq_dqb.dqb_curinodes; + + buf->f_files = min(buf->f_files, limit); + buf->f_ffree = min(buf->f_ffree, remaining); }
spin_unlock(&dquot->dq_dqb_lock); diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 12ef91d170bb..7faf1af59d5d 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -681,7 +681,6 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, 0, 0, fuse_wait_dax_page(inode)); }
-/* dmap_end == 0 leads to unmapping of whole file */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 89bffaed421f..e4d6cc0d2332 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1875,7 +1875,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) { filemap_invalidate_unlock(mapping); return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ceb9f7d23038..3e4c3fcb588b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -241,7 +241,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (dax_truncate) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out_inode_unlock; } @@ -3023,7 +3023,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, inode_lock(inode); if (block_faults) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out; } diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h index 8b39c15c408c..15b2f094d36e 100644 --- a/fs/hostfs/hostfs.h +++ b/fs/hostfs/hostfs.h @@ -60,7 +60,7 @@ struct hostfs_stat { unsigned int uid; unsigned int gid; unsigned long long size; - struct hostfs_timespec atime, mtime, ctime; + struct hostfs_timespec atime, mtime, ctime, btime; unsigned int blksize; unsigned long long blocks; struct { diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index ff201753fd18..44fe76174e12 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -27,6 +27,7 @@ struct hostfs_inode_info { struct inode vfs_inode; struct mutex open_mutex; dev_t dev; + struct hostfs_timespec btime; };
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode) @@ -557,6 +558,7 @@ static int hostfs_inode_set(struct inode *ino, void *data) }
HOSTFS_I(ino)->dev = dev; + HOSTFS_I(ino)->btime = st->btime; ino->i_ino = st->ino; ino->i_mode = st->mode; return hostfs_inode_update(ino, st); @@ -567,7 +569,10 @@ static int hostfs_inode_test(struct inode *inode, void *data) const struct hostfs_stat *st = data; dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev; + return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev && + (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) && + HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec && + HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec; }
static struct inode *hostfs_iget(struct super_block *sb, char *name) diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c index 97e9c40a9448..3bcd9f35e70b 100644 --- a/fs/hostfs/hostfs_user.c +++ b/fs/hostfs/hostfs_user.c @@ -18,39 +18,48 @@ #include "hostfs.h" #include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p) +static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p) { - p->ino = buf->st_ino; - p->mode = buf->st_mode; - p->nlink = buf->st_nlink; - p->uid = buf->st_uid; - p->gid = buf->st_gid; - p->size = buf->st_size; - p->atime.tv_sec = buf->st_atime; - p->atime.tv_nsec = 0; - p->ctime.tv_sec = buf->st_ctime; - p->ctime.tv_nsec = 0; - p->mtime.tv_sec = buf->st_mtime; - p->mtime.tv_nsec = 0; - p->blksize = buf->st_blksize; - p->blocks = buf->st_blocks; - p->rdev.maj = os_major(buf->st_rdev); - p->rdev.min = os_minor(buf->st_rdev); - p->dev.maj = os_major(buf->st_dev); - p->dev.min = os_minor(buf->st_dev); + p->ino = buf->stx_ino; + p->mode = buf->stx_mode; + p->nlink = buf->stx_nlink; + p->uid = buf->stx_uid; + p->gid = buf->stx_gid; + p->size = buf->stx_size; + p->atime.tv_sec = buf->stx_atime.tv_sec; + p->atime.tv_nsec = buf->stx_atime.tv_nsec; + p->ctime.tv_sec = buf->stx_ctime.tv_sec; + p->ctime.tv_nsec = buf->stx_ctime.tv_nsec; + p->mtime.tv_sec = buf->stx_mtime.tv_sec; + p->mtime.tv_nsec = buf->stx_mtime.tv_nsec; + if (buf->stx_mask & STATX_BTIME) { + p->btime.tv_sec = buf->stx_btime.tv_sec; + p->btime.tv_nsec = buf->stx_btime.tv_nsec; + } else { + memset(&p->btime, 0, sizeof(p->btime)); + } + p->blksize = buf->stx_blksize; + p->blocks = buf->stx_blocks; + p->rdev.maj = buf->stx_rdev_major; + p->rdev.min = buf->stx_rdev_minor; + p->dev.maj = buf->stx_dev_major; + p->dev.min = buf->stx_dev_minor; }
int stat_file(const char *path, struct hostfs_stat *p, int fd) { - struct stat64 buf; + struct statx buf; + int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) { - if (fstat64(fd, &buf) < 0) - return -errno; - } else if (lstat64(path, &buf) < 0) { - return -errno; + flags |= AT_EMPTY_PATH; + path = ""; } - stat64_to_hostfs(&buf, p); + + if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0) + return -errno; + + statx_to_hostfs(&buf, p); return 0; }
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index eb2f8273e6f1..09df40b612fb 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, de = tmpde; } /* Basic sanity check, whether name doesn't exceed dir entry */ - if (de_len < de->name_len[0] + + if (de_len < sizeof(struct iso_directory_record) || + de_len < de->name_len[0] + sizeof(struct iso_directory_record)) { printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 8f85177f284b..93db6eec4465 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -117,7 +117,8 @@ do { \ if (!(RC)) { \ if (((P)->header.nextindex > \ (((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \ - ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \ + ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \ + ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \ BT_PUTPAGE(MP); \ jfs_error((IP)->i_sb, \ "DT_GETPAGE: dtree page corrupt\n"); \ diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 7252941bf165..b3b08c5ae701 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -559,11 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check: if (EALIST_SIZE(ea_buf->xattr) != ea_size) { - int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); - - printk(KERN_ERR "ea_get: invalid extended attribute\n"); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, - ea_buf->xattr, size, 1); + if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) { + printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n", + EALIST_SIZE(ea_buf->xattr)); + } else { + int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); + + printk(KERN_ERR "ea_get: invalid extended attribute\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, + ea_buf->xattr, size, 1); + } ea_release(inode, ea_buf); rc = -EIO; goto clean_up; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 4bf2526a3a18..55cfa1c4e0a6 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -570,17 +570,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) ret = true; - else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { - struct inode *inode; - - spin_lock(&delegation->lock); - inode = delegation->inode; - if (inode && list_empty(&NFS_I(inode)->open_files)) - ret = true; - spin_unlock(&delegation->lock); - } - if (ret) - clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) @@ -821,11 +810,25 @@ int nfs4_inode_make_writeable(struct inode *inode) return nfs4_inode_return_delegation(inode); }
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server, - struct nfs_delegation *delegation) +static void +nfs_mark_return_if_closed_delegation(struct nfs_server *server, + struct nfs_delegation *delegation) { - set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); - set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); + struct inode *inode; + + if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || + test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) + return; + spin_lock(&delegation->lock); + inode = delegation->inode; + if (!inode) + goto out; + if (list_empty(&NFS_I(inode)->open_files)) + nfs_mark_return_delegation(server, delegation); + else + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); +out: + spin_unlock(&delegation->lock); }
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index 7b59a40d40c0..784f7c1d003b 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -14,6 +14,7 @@ #include <linux/rcupdate.h> #include <linux/lockd/lockd.h>
+#include "internal.h" #include "nfs4_fs.h" #include "netns.h" #include "sysfs.h" @@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt) rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL); }
+/* + * Shut down the nfs_client only once all the superblocks + * have been shut down. + */ +static void shutdown_nfs_client(struct nfs_client *clp) +{ + struct nfs_server *server; + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + if (!(server->flags & NFS_MOUNT_SHUTDOWN)) { + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + nfs_mark_client_ready(clp, -EIO); + shutdown_client(clp->cl_rpcclient); +} + static ssize_t shutdown_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
server->flags |= NFS_MOUNT_SHUTDOWN; shutdown_client(server->client); - shutdown_client(server->nfs_client->cl_rpcclient);
if (!IS_ERR(server->client_acl)) shutdown_client(server->client_acl); @@ -267,6 +286,7 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr, if (server->nlm_host) shutdown_client(server->nlm_host->h_rpcclnt); out: + shutdown_nfs_client(server->nfs_client); return count; }
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index a25cb2ff1b0b..140784446ad2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1066,6 +1066,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) return openlockstateid(stid); }
+/* + * As the sc_free callback of deleg, this may be called by nfs4_put_stid + * in nfsd_break_one_deleg. + * Considering nfsd_break_one_deleg is called with the flc->flc_lock held, + * this function mustn't ever sleep. + */ static void nfs4_free_deleg(struct nfs4_stid *stid) { struct nfs4_delegation *dp = delegstateid(stid); @@ -4920,6 +4926,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { + bool queued; /* * We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease @@ -4928,7 +4935,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) * we know it's safe to take a reference. */ refcount_inc(&dp->dl_stid.sc_count); - WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); + queued = nfsd4_run_cb(&dp->dl_recall); + WARN_ON_ONCE(!queued); + if (!queued) + nfs4_put_stid(&dp->dl_stid); }
/* Called from break_lease() with flc_lock held. */ @@ -6279,14 +6289,19 @@ deleg_reaper(struct nfsd_net *nn) spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); - if (clp->cl_state != NFSD4_ACTIVE || - list_empty(&clp->cl_delegations) || - atomic_read(&clp->cl_delegs_in_recall) || - test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || - (ktime_get_boottime_seconds() - - clp->cl_ra_time < 5)) { + + if (clp->cl_state != NFSD4_ACTIVE) + continue; + if (list_empty(&clp->cl_delegations)) + continue; + if (atomic_read(&clp->cl_delegs_in_recall)) + continue; + if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags)) + continue; + if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) + continue; + if (clp->cl_cb_state != NFSD4_CB_UP) continue; - } list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */ diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 9089c58a005c..28aae6ea1e61 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes) u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot || - off + sizeof(struct NTFS_DE) > end) { + size_add(off, sizeof(struct NTFS_DE)) > end) { /* incorrect index buffer. */ return false; } @@ -736,7 +736,7 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx, if (end > total) return NULL;
- if (off + sizeof(struct NTFS_DE) > end) + if (size_add(off, sizeof(struct NTFS_DE)) > end) return NULL;
e = Add2Ptr(hdr, off); diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h index 964e27c7b901..c1d1c4a7cf4d 100644 --- a/fs/ntfs3/ntfs.h +++ b/fs/ntfs3/ntfs.h @@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr) struct NTFS_DE *e; u16 esize;
- if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used ) + if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used) return NULL;
e = Add2Ptr(hdr, de_off); diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f0937902f7b4..e6191249169e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1796,6 +1796,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
el = root_el; while (el->l_tree_depth) { + if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) { + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has invalid tree depth %u in extent list\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + le16_to_cpu(el->l_tree_depth)); + ret = -EROFS; + goto out; + } if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has empty extent list at depth %u\n", diff --git a/fs/proc/base.c b/fs/proc/base.c index 91fe20b7657c..d444155581ca 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -416,7 +416,7 @@ static const struct file_operations proc_pid_cmdline_ops = { #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. - * Returns the resolved symbol. If that fails, simply return the address. + * Returns the resolved symbol to user space. */ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c index 1fc1683b15bd..db9076da2182 100644 --- a/fs/smb/client/cifsacl.c +++ b/fs/smb/client/cifsacl.c @@ -778,7 +778,8 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, }
/* validate that we do not go past end of acl */ - if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { + if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) || + end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { cifs_dbg(VFS, "ACL too small to parse DACL\n"); return; } @@ -799,8 +800,11 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, if (num_aces > 0) { umode_t denied_mode = 0;
- if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) + if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / + (offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; + ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL); if (!ppace) diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 198681d14153..2d2e41ac9e9d 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -316,6 +316,7 @@ cifs_abort_connection(struct TCP_Server_Info *server) server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; + put_net(cifs_net_ns(server)); } server->sequence_number = 0; server->session_estab = false; @@ -3147,8 +3148,12 @@ generic_ip_connect(struct TCP_Server_Info *server) /* * Grab netns reference for the socket. * - * It'll be released here, on error, or in clean_demultiplex_info() upon server - * teardown. + * This reference will be released in several situations: + * - In the failure path before the cifsd thread is started. + * - In the all place where server->socket is released, it is + * also set to NULL. + * - Ultimately in clean_demultiplex_info(), during the final + * teardown. */ get_net(net);
@@ -3164,10 +3169,8 @@ generic_ip_connect(struct TCP_Server_Info *server) }
rc = bind_socket(server); - if (rc < 0) { - put_net(cifs_net_ns(server)); + if (rc < 0) return rc; - }
/* * Eventually check for other socket options to change from @@ -3213,9 +3216,6 @@ generic_ip_connect(struct TCP_Server_Info *server) if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server);
- if (rc < 0) - put_net(cifs_net_ns(server)); - return rc; }
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c index 58380a986af5..5345d2417c7f 100644 --- a/fs/smb/server/auth.c +++ b/fs/smb/server/auth.c @@ -1012,9 +1012,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
ses_enc_key = enc ? sess->smb3encryptionkey : sess->smb3decryptionkey; - if (enc) - ksmbd_user_session_get(sess); memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); + if (!enc) + ksmbd_user_session_put(sess);
return 0; } @@ -1213,7 +1213,7 @@ int ksmbd_crypt_message(struct ksmbd_work *work, struct kvec *iov, free_sg: kfree(sg); free_req: - kfree(req); + aead_request_free(req); free_ctx: ksmbd_release_crypto_ctx(ctx); return rc; diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 9a134181df61..82dcc86a32c5 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -180,7 +180,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) down_write(&sessions_table_lock); down_write(&conn->session_lock); xa_for_each(&conn->sessions, id, sess) { - if (atomic_read(&sess->refcnt) == 0 && + if (atomic_read(&sess->refcnt) <= 1 && (sess->state != SMB2_SESSION_VALID || time_after(jiffies, sess->last_active + SMB2_SESSION_TIMEOUT))) { @@ -229,7 +229,11 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (!ksmbd_chann_del(conn, sess) && xa_empty(&sess->ksmbd_chann_list)) { hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + down_write(&conn->session_lock); + xa_erase(&conn->sessions, sess->id); + up_write(&conn->session_lock); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } } @@ -248,13 +252,30 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (xa_empty(&sess->ksmbd_chann_list)) { xa_erase(&conn->sessions, sess->id); hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } up_write(&conn->session_lock); up_write(&sessions_table_lock); }
+bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id) +{ + struct ksmbd_session *sess; + + down_read(&conn->session_lock); + sess = xa_load(&conn->sessions, id); + if (sess) { + up_read(&conn->session_lock); + return true; + } + up_read(&conn->session_lock); + + return false; +} + struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id) { @@ -308,8 +329,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess)
if (atomic_read(&sess->refcnt) <= 0) WARN_ON(1); - else - atomic_dec(&sess->refcnt); + else if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); }
struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn, @@ -414,7 +435,7 @@ static struct ksmbd_session *__session_create(int protocol) xa_init(&sess->rpc_handle_list); sess->sequence_number = 1; rwlock_init(&sess->tree_conns_lock); - atomic_set(&sess->refcnt, 1); + atomic_set(&sess->refcnt, 2);
ret = __init_smb2_session(sess); if (ret) diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h index c1c4b20bd5c6..f21348381d59 100644 --- a/fs/smb/server/mgmt/user_session.h +++ b/fs/smb/server/mgmt/user_session.h @@ -87,6 +87,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess); struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id); struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id); +bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id); int ksmbd_session_register(struct ksmbd_conn *conn, struct ksmbd_session *sess); void ksmbd_sessions_deregister(struct ksmbd_conn *conn); diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 11e82a14a40a..371a5ead8663 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -724,8 +724,8 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess;
+ ksmbd_conn_r_count_inc(conn); if (opinfo->op_state == OPLOCK_ACK_WAIT) { - ksmbd_conn_r_count_inc(conn); INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work);
@@ -833,8 +833,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess;
+ ksmbd_conn_r_count_inc(conn); if (opinfo->op_state == OPLOCK_ACK_WAIT) { - ksmbd_conn_r_count_inc(conn); INIT_WORK(&work->work, __smb2_lease_break_noti); ksmbd_queue_work(work); wait_for_break_ack(opinfo); @@ -1505,6 +1505,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req) if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease_v2) - 4) + return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; @@ -1517,6 +1521,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req) } else { struct create_lease *lc = (struct create_lease *)cc;
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease)) + return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 58e5cc2b1f3e..8877f9e900b2 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1707,44 +1707,38 @@ int smb2_sess_setup(struct ksmbd_work *work)
if (conn->dialect != sess->dialect) { rc = -EINVAL; - ksmbd_user_session_put(sess); goto out_err; }
if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) { rc = -EINVAL; - ksmbd_user_session_put(sess); goto out_err; }
if (strncmp(conn->ClientGUID, sess->ClientGUID, SMB2_CLIENT_GUID_SIZE)) { rc = -ENOENT; - ksmbd_user_session_put(sess); goto out_err; }
if (sess->state == SMB2_SESSION_IN_PROGRESS) { rc = -EACCES; - ksmbd_user_session_put(sess); goto out_err; }
if (sess->state == SMB2_SESSION_EXPIRED) { rc = -EFAULT; - ksmbd_user_session_put(sess); goto out_err; } - ksmbd_user_session_put(sess);
if (ksmbd_conn_need_reconnect(conn)) { rc = -EFAULT; + ksmbd_user_session_put(sess); sess = NULL; goto out_err; }
- sess = ksmbd_session_lookup(conn, sess_id); - if (!sess) { + if (is_ksmbd_session_in_connection(conn, sess_id)) { rc = -EACCES; goto out_err; } @@ -1910,6 +1904,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
sess->last_active = jiffies; sess->state = SMB2_SESSION_EXPIRED; + ksmbd_user_session_put(sess); + work->sess = NULL; if (try_delay) { ksmbd_conn_set_need_reconnect(conn); ssleep(5); @@ -2235,13 +2231,14 @@ int smb2_session_logoff(struct ksmbd_work *work) return -ENOENT; }
- ksmbd_destroy_file_table(&sess->file_table); down_write(&conn->session_lock); sess->state = SMB2_SESSION_EXPIRED; up_write(&conn->session_lock);
- ksmbd_free_user(sess->user); - sess->user = NULL; + if (sess->user) { + ksmbd_free_user(sess->user); + sess->user = NULL; + } ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
rsp->StructureSize = cpu_to_le16(4); @@ -2704,6 +2701,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; }
+ if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_reconn_v2_req)) { + err = -EINVAL; + goto out; + } + recon_v2 = (struct create_durable_reconn_v2_req *)context; persistent_id = recon_v2->Fid.PersistentFileId; dh_info->fp = ksmbd_lookup_durable_fd(persistent_id); @@ -2737,6 +2741,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; }
+ if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_reconn_req)) { + err = -EINVAL; + goto out; + } + recon = (struct create_durable_reconn_req *)context; persistent_id = recon->Data.Fid.PersistentFileId; dh_info->fp = ksmbd_lookup_durable_fd(persistent_id); @@ -2762,6 +2773,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work, goto out; }
+ if (le16_to_cpu(context->DataOffset) + + le32_to_cpu(context->DataLength) < + sizeof(struct create_durable_req_v2)) { + err = -EINVAL; + goto out; + } + durable_v2_blob = (struct create_durable_req_v2 *)context; ksmbd_debug(SMB, "Request for durable v2 open\n"); diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c index 109036e2227c..b90f893762f4 100644 --- a/fs/smb/server/smbacl.c +++ b/fs/smb/server/smbacl.c @@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap, return -EIO; }
+ if (psid->num_subauth == 0) { + pr_err("%s: zero subauthorities!\n", __func__); + return -EIO; + } + if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 46705dacdd08..7751be9452c1 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -215,6 +215,13 @@ struct drm_dp_mst_branch { */ struct list_head destroy_next;
+ /** + * @rad: Relative Address of the MST branch. + * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0, + * unset and unused. For MST branches connected after mst_primary, + * in each element of rad[] the nibbles are ordered by the most + * signifcant 4 bits first and the least significant 4 bits second. + */ u8 rad[8]; u8 lct; int num_ports; diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h index c50b5670c4a5..197916ee91a4 100644 --- a/include/linux/context_tracking_irq.h +++ b/include/linux/context_tracking_irq.h @@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void); void ct_nmi_enter(void); void ct_nmi_exit(void); #else -static inline void ct_irq_enter(void) { } -static inline void ct_irq_exit(void) { } +static __always_inline void ct_irq_enter(void) { } +static __always_inline void ct_irq_exit(void) { } static inline void ct_irq_enter_irqson(void) { } static inline void ct_irq_exit_irqson(void) { } -static inline void ct_nmi_enter(void) { } -static inline void ct_nmi_exit(void) { } +static __always_inline void ct_nmi_enter(void) { } +static __always_inline void ct_nmi_exit(void) { } #endif
#endif diff --git a/include/linux/coresight.h b/include/linux/coresight.h index a269fffaf991..dccfadde84f4 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -575,6 +575,10 @@ extern int coresight_enable(struct coresight_device *csdev); extern void coresight_disable(struct coresight_device *csdev); extern int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value); +typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int); +extern int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb);
extern int coresight_claim_device(struct coresight_device *csdev); extern int coresight_claim_device_unlocked(struct coresight_device *csdev); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 525cc031596b..7efb4493e51c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -83,7 +83,7 @@ struct fwnode_endpoint { #define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" #define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
-#define NR_FWNODE_REFERENCE_ARGS 8 +#define NR_FWNODE_REFERENCE_ARGS 16
/** * struct fwnode_reference_args - Fwnode reference with additional arguments diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a1dc88ddbff..2610a7d156da 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -441,7 +441,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, static inline void disable_irq_nosync_lockdep(unsigned int irq) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_disable(); #endif } @@ -449,7 +449,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq) static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_save(*flags); #endif } @@ -464,7 +464,7 @@ static inline void disable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep(unsigned int irq) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_enable(); #endif enable_irq(irq); @@ -472,7 +472,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_restore(*flags); #endif enable_irq(irq); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 8b7daccd11be..3c3a7dede0ef 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1286,14 +1286,25 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, }
/* - * track_pfn_copy is called when vma that is covering the pfnmap gets - * copied through copy_page_range(). + * track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page + * tables copied during copy_page_range(). On success, stores the pfn to be + * passed to untrack_pfn_copy(). */ -static inline int track_pfn_copy(struct vm_area_struct *vma) +static inline int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn) { return 0; }
+/* + * untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during + * copy_page_range(), but after track_pfn_copy() was already called. + */ +static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma, + unsigned long pfn) +{ +} + /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or @@ -1306,8 +1317,10 @@ static inline void untrack_pfn(struct vm_area_struct *vma, }
/* - * untrack_pfn_clear is called while mremapping a pfnmap for a new region - * or fails to copy pgtable during duplicate vm area. + * untrack_pfn_clear is called in the following cases on a VM_PFNMAP VMA: + * + * 1) During mremap() on the src VMA after the page tables were moved. + * 2) During fork() on the dst VMA, immediately after duplicating the src VMA. */ static inline void untrack_pfn_clear(struct vm_area_struct *vma) { @@ -1318,7 +1331,10 @@ extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long size); extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); -extern int track_pfn_copy(struct vm_area_struct *vma); +extern int track_pfn_copy(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, unsigned long *pfn); +extern void untrack_pfn_copy(struct vm_area_struct *dst_vma, + unsigned long pfn); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size, bool mm_wr_locked); extern void untrack_pfn_clear(struct vm_area_struct *vma); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 7c9b35448563..406855d73901 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work)
extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); +extern bool pm_runtime_need_not_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev);
@@ -252,6 +253,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 7602d1f8a9ec..72da69cc5764 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -142,7 +142,7 @@ static inline void rcu_sysrq_end(void) { } #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else -static inline void rcu_irq_work_resched(void) { } +static __always_inline void rcu_irq_work_resched(void) { } #endif
#ifdef CONFIG_RCU_NOCB_CPU diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h index 59d3736c454c..737b50f40137 100644 --- a/include/linux/sched/smt.h +++ b/include/linux/sched/smt.h @@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void) return static_branch_likely(&sched_smt_present); } #else -static inline bool sched_smt_active(void) { return false; } +static __always_inline bool sched_smt_active(void) { return false; } #endif
void arch_smt_update(void); diff --git a/include/linux/trace.h b/include/linux/trace.h index 2a70a447184c..fdcd76b7be83 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -51,7 +51,7 @@ int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...); int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); -struct trace_array *trace_array_get_by_name(const char *name); +struct trace_array *trace_array_get_by_name(const char *name, const char *systems); int trace_array_destroy(struct trace_array *tr);
/* For osnoise tracer */ @@ -84,7 +84,7 @@ static inline int trace_array_init_printk(struct trace_array *tr) static inline void trace_array_put(struct trace_array *tr) { } -static inline struct trace_array *trace_array_get_by_name(const char *name) +static inline struct trace_array *trace_array_get_by_name(const char *name, const char *systems) { return NULL; } diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index aa1bc4172662..fe95d13c5e4d 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -683,6 +683,20 @@ struct trace_event_file { atomic_t tm_ref; /* trigger-mode reference counter */ };
+#ifdef CONFIG_HIST_TRIGGERS +extern struct irq_work hist_poll_work; +extern wait_queue_head_t hist_poll_wq; + +static inline void hist_poll_wakeup(void) +{ + if (wq_has_sleeper(&hist_poll_wq)) + irq_work_queue(&hist_poll_work); +} + +#define hist_poll_wait(file, wait) \ + poll_wait(file, &hist_poll_wq, wait) +#endif + #define __TRACE_EVENT_FLAGS(name, value) \ static int __init trace_init_flags_##name(void) \ { \ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bc459d061629..c7e9ec9e9a80 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2738,6 +2738,7 @@ struct ib_device { * It is a NULL terminated array. */ const struct attribute_group *groups[4]; + u8 hw_stats_attr_index;
u64 uverbs_cmd_mask;
diff --git a/kernel/events/core.c b/kernel/events/core.c index 4dd8936b5aa0..b710976fb01b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2333,6 +2333,7 @@ group_sched_out(struct perf_event *group_event, struct perf_event_context *ctx) #define DETACH_GROUP 0x01UL #define DETACH_CHILD 0x02UL #define DETACH_DEAD 0x04UL +#define DETACH_EXIT 0x08UL
/* * Cross CPU call to remove a performance event @@ -2347,6 +2348,7 @@ __perf_remove_from_context(struct perf_event *event, void *info) { struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; + enum perf_event_state state = PERF_EVENT_STATE_OFF; unsigned long flags = (unsigned long)info;
if (ctx->is_active & EVENT_TIME) { @@ -2358,16 +2360,19 @@ __perf_remove_from_context(struct perf_event *event, * Ensure event_sched_out() switches to OFF, at the very least * this avoids raising perf_pending_task() at this time. */ - if (flags & DETACH_DEAD) + if (flags & DETACH_EXIT) + state = PERF_EVENT_STATE_EXIT; + if (flags & DETACH_DEAD) { event->pending_disable = 1; + state = PERF_EVENT_STATE_DEAD; + } event_sched_out(event, ctx); + perf_event_set_state(event, min(event->state, state)); if (flags & DETACH_GROUP) perf_group_detach(event); if (flags & DETACH_CHILD) perf_child_detach(event); list_del_event(event, ctx); - if (flags & DETACH_DEAD) - event->state = PERF_EVENT_STATE_DEAD;
if (!pmu_ctx->nr_events) { pmu_ctx->rotate_necessary = 0; @@ -11556,6 +11561,21 @@ static int pmu_dev_alloc(struct pmu *pmu) static struct lock_class_key cpuctx_mutex; static struct lock_class_key cpuctx_lock;
+static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new) +{ + void *tmp, *val = idr_find(idr, id); + + if (val != old) + return false; + + tmp = idr_replace(idr, new, id); + if (IS_ERR(tmp)) + return false; + + WARN_ON_ONCE(tmp != val); + return true; +} + int perf_pmu_register(struct pmu *pmu, const char *name, int type) { int cpu, ret, max = PERF_TYPE_MAX; @@ -11577,7 +11597,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) if (type >= 0) max = type;
- ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); + ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL); if (ret < 0) goto free_pdc;
@@ -11585,6 +11605,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
type = ret; pmu->type = type; + atomic_set(&pmu->exclusive_cnt, 0);
if (pmu_bus_running && !pmu->dev) { ret = pmu_dev_alloc(pmu); @@ -11633,14 +11654,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default;
+ /* + * Now that the PMU is complete, make it visible to perf_try_init_event(). + */ + if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) + goto free_context; list_add_rcu(&pmu->entry, &pmus); - atomic_set(&pmu->exclusive_cnt, 0); + ret = 0; unlock: mutex_unlock(&pmus_lock);
return ret;
+free_context: + free_percpu(pmu->cpu_pmu_context); + free_dev: if (pmu->dev && pmu->dev != PMU_NULL_DEV) { device_del(pmu->dev); @@ -13116,12 +13145,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) mutex_lock(&parent_event->child_mutex); }
- perf_remove_from_context(event, detach_flags); - - raw_spin_lock_irq(&ctx->lock); - if (event->state > PERF_EVENT_STATE_EXIT) - perf_event_set_state(event, PERF_EVENT_STATE_EXIT); - raw_spin_unlock_irq(&ctx->lock); + perf_remove_from_context(event, detach_flags | DETACH_EXIT);
/* * Child events can be freed. diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index b0930b418552..52de76ef8723 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -19,7 +19,7 @@
static void perf_output_wakeup(struct perf_output_handle *handle) { - atomic_set(&handle->rb->poll, EPOLLIN); + atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM);
handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending_irq); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f5dfc2f22d79..a554f43d3ceb 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -159,6 +159,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); int err; struct mmu_notifier_range range; + pte_t pte;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); @@ -178,6 +179,16 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (!page_vma_mapped_walk(&pvmw)) goto unlock; VM_BUG_ON_PAGE(addr != pvmw.address, old_page); + pte = ptep_get(pvmw.pte); + + /* + * Handle PFN swap PTES, such as device-exclusive ones, that actually + * map pages: simply trigger GUP again to fix it up. + */ + if (unlikely(!pte_present(pte))) { + page_vma_mapped_walk_done(&pvmw); + goto unlock; + }
if (new_page) { folio_get(new_folio); @@ -192,7 +203,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); }
- flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); + flush_cache_page(vma, addr, pte_pfn(pte)); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) set_pte_at_notify(mm, addr, pvmw.pte, diff --git a/kernel/fork.c b/kernel/fork.c index 23efaa2c42e4..97f433fb4b5e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -518,6 +518,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) vma_numab_state_init(new); dup_anon_vma_name(orig, new);
+ /* track_pfn_copy() will later take care of copying internal state. */ + if (unlikely(new->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(new); + return new; }
diff --git a/kernel/kexec_elf.c b/kernel/kexec_elf.c index d3689632e8b9..3a5c25b2adc9 100644 --- a/kernel/kexec_elf.c +++ b/kernel/kexec_elf.c @@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_buf *kbuf, unsigned long *lowest_load_addr) { - unsigned long lowest_addr = UINT_MAX; + unsigned long lowest_addr = ULONG_MAX; int ret; size_t i;
diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index 34bfae72f295..de9117c0e671 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -29,6 +29,7 @@ #include <linux/export.h> #include <linux/sched.h> #include <linux/sched/debug.h> +#include <linux/sched/wake_q.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/ftrace.h> @@ -38,7 +39,7 @@ static noinline void __down(struct semaphore *sem); static noinline int __down_interruptible(struct semaphore *sem); static noinline int __down_killable(struct semaphore *sem); static noinline int __down_timeout(struct semaphore *sem, long timeout); -static noinline void __up(struct semaphore *sem); +static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q);
/** * down - acquire the semaphore @@ -183,13 +184,16 @@ EXPORT_SYMBOL(down_timeout); void __sched up(struct semaphore *sem) { unsigned long flags; + DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irqsave(&sem->lock, flags); if (likely(list_empty(&sem->wait_list))) sem->count++; else - __up(sem); + __up(sem, &wake_q); raw_spin_unlock_irqrestore(&sem->lock, flags); + if (!wake_q_empty(&wake_q)) + wake_up_q(&wake_q); } EXPORT_SYMBOL(up);
@@ -269,11 +273,12 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long timeout) return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout); }
-static noinline void __sched __up(struct semaphore *sem) +static noinline void __sched __up(struct semaphore *sem, + struct wake_q_head *wake_q) { struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, struct semaphore_waiter, list); list_del(&waiter->list); waiter->up = true; - wake_up_process(waiter->task); + wake_q_add(wake_q, waiter->task); } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b9e99bc3b1cf..6c639e48e49a 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2780,7 +2780,7 @@ int sched_dl_global_validate(void) * value smaller than the currently allocated bandwidth in * any of the root_domains. */ - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { rcu_read_lock_sched();
if (dl_bw_visited(cpu, gen)) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 9d8f60e0cb55..545393601be8 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -853,7 +853,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) if (unlikely(is_global_init(current))) return -EPERM;
- if (!preemptible()) { + if (preempt_count() != 0 || irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 61caff3d4091..62d93db72b0a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -5995,9 +5995,9 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested) /* Ignore dropped events before test starts. */ if (started) { if (nested) - data->bytes_dropped += len; - else data->bytes_dropped_nested += len; + else + data->bytes_dropped += len; } return len; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d9af60b238e..a41c99350a5b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -9417,7 +9417,8 @@ static int trace_array_create_dir(struct trace_array *tr) return ret; }
-static struct trace_array *trace_array_create(const char *name) +static struct trace_array * +trace_array_create_systems(const char *name, const char *systems) { struct trace_array *tr; int ret; @@ -9437,6 +9438,12 @@ static struct trace_array *trace_array_create(const char *name) if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) goto out_free_tr;
+ if (systems) { + tr->system_names = kstrdup_const(systems, GFP_KERNEL); + if (!tr->system_names) + goto out_free_tr; + } + tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
cpumask_copy(tr->tracing_cpumask, cpu_all_mask); @@ -9480,12 +9487,18 @@ static struct trace_array *trace_array_create(const char *name) free_trace_buffers(tr); free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); + kfree_const(tr->system_names); kfree(tr->name); kfree(tr);
return ERR_PTR(ret); }
+static struct trace_array *trace_array_create(const char *name) +{ + return trace_array_create_systems(name, NULL); +} + static int instance_mkdir(const char *name) { struct trace_array *tr; @@ -9511,6 +9524,7 @@ static int instance_mkdir(const char *name) /** * trace_array_get_by_name - Create/Lookup a trace array, given its name. * @name: The name of the trace array to be looked up/created. + * @systems: A list of systems to create event directories for (NULL for all) * * Returns pointer to trace array with given name. * NULL, if it cannot be created. @@ -9524,7 +9538,7 @@ static int instance_mkdir(const char *name) * trace_array_put() is called, user space can not delete it. * */ -struct trace_array *trace_array_get_by_name(const char *name) +struct trace_array *trace_array_get_by_name(const char *name, const char *systems) { struct trace_array *tr;
@@ -9536,7 +9550,7 @@ struct trace_array *trace_array_get_by_name(const char *name) goto out_unlock; }
- tr = trace_array_create(name); + tr = trace_array_create_systems(name, systems);
if (IS_ERR(tr)) tr = NULL; @@ -9583,6 +9597,7 @@ static int __remove_instance(struct trace_array *tr)
free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); + kfree_const(tr->system_names); kfree(tr->name); kfree(tr);
@@ -10301,7 +10316,7 @@ __init static void enable_instances(void) if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) do_allocate_snapshot(tok);
- tr = trace_array_get_by_name(tok); + tr = trace_array_get_by_name(tok, NULL); if (!tr) { pr_warn("Failed to create instance buffer %s\n", curr_str); continue; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index e45756f1ac2b..db0d2641125e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -377,6 +377,7 @@ struct trace_array { unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; unsigned int flags; raw_spinlock_t start_lock; + const char *system_names; struct list_head err_log; struct dentry *dir; struct dentry *options; diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 7ccc7a8e155b..dbe29b4c6a7a 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -633,7 +633,7 @@ trace_boot_init_instances(struct xbc_node *node) if (!p || *p == '\0') continue;
- tr = trace_array_get_by_name(p); + tr = trace_array_get_by_name(p, NULL); if (!tr) { pr_err("Failed to get trace instance %s\n", p); continue; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 9d22745cdea5..562efd668572 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -3056,6 +3056,41 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) up_write(&trace_event_sem); }
+static bool event_in_systems(struct trace_event_call *call, + const char *systems) +{ + const char *system; + const char *p; + + if (!systems) + return true; + + system = call->class->system; + p = strstr(systems, system); + if (!p) + return false; + + if (p != systems && !isspace(*(p - 1)) && *(p - 1) != ',') + return false; + + p += strlen(system); + return !*p || isspace(*p) || *p == ','; +} + +#ifdef CONFIG_HIST_TRIGGERS +/* + * Wake up waiter on the hist_poll_wq from irq_work because the hist trigger + * may happen in any context. + */ +static void hist_poll_event_irq_work(struct irq_work *work) +{ + wake_up_all(&hist_poll_wq); +} + +DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work); +DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq); +#endif + static struct trace_event_file * trace_create_new_event(struct trace_event_call *call, struct trace_array *tr) @@ -3065,9 +3100,12 @@ trace_create_new_event(struct trace_event_call *call, struct trace_event_file *file; unsigned int first;
+ if (!event_in_systems(call, tr->system_names)) + return NULL; + file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) - return NULL; + return ERR_PTR(-ENOMEM);
pid_list = rcu_dereference_protected(tr->filtered_pids, lockdep_is_held(&event_mutex)); @@ -3132,8 +3170,17 @@ __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) struct trace_event_file *file;
file = trace_create_new_event(call, tr); + /* + * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed + * allocation, or NULL if the event is not part of the tr->system_names. + * When the event is not part of the tr->system_names, return zero, not + * an error. + */ if (!file) - return -ENOMEM; + return 0; + + if (IS_ERR(file)) + return PTR_ERR(file);
if (eventdir_initialized) return event_create_dir(tr->event_dir, file); @@ -3172,8 +3219,17 @@ __trace_early_add_new_event(struct trace_event_call *call, int ret;
file = trace_create_new_event(call, tr); + /* + * trace_create_new_event() returns ERR_PTR(-ENOMEM) if failed + * allocation, or NULL if the event is not part of the tr->system_names. + * When the event is not part of the tr->system_names, return zero, not + * an error. + */ if (!file) - return -ENOMEM; + return 0; + + if (IS_ERR(file)) + return PTR_ERR(file);
ret = event_define_fields(call); if (ret) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 604d63380a90..e6f9cbc622c7 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -5322,6 +5322,8 @@ static void event_hist_trigger(struct event_trigger_data *data,
if (resolve_var_refs(hist_data, key, var_ref_vals, true)) hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); + + hist_poll_wakeup(); }
static void hist_trigger_stacktrace_print(struct seq_file *m, @@ -5601,49 +5603,137 @@ static void hist_trigger_show(struct seq_file *m, n_entries, (u64)atomic64_read(&hist_data->map->drops)); }
+struct hist_file_data { + struct file *file; + u64 last_read; + u64 last_act; +}; + +static u64 get_hist_hit_count(struct trace_event_file *event_file) +{ + struct hist_trigger_data *hist_data; + struct event_trigger_data *data; + u64 ret = 0; + + list_for_each_entry(data, &event_file->triggers, list) { + if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) { + hist_data = data->private_data; + ret += atomic64_read(&hist_data->map->hits); + } + } + return ret; +} + static int hist_show(struct seq_file *m, void *v) { + struct hist_file_data *hist_file = m->private; struct event_trigger_data *data; struct trace_event_file *event_file; - int n = 0, ret = 0; + int n = 0;
- mutex_lock(&event_mutex); + guard(mutex)(&event_mutex);
- event_file = event_file_file(m->private); - if (unlikely(!event_file)) { - ret = -ENODEV; - goto out_unlock; - } + event_file = event_file_file(hist_file->file); + if (unlikely(!event_file)) + return -ENODEV;
list_for_each_entry(data, &event_file->triggers, list) { if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) hist_trigger_show(m, data, n++); } + hist_file->last_read = get_hist_hit_count(event_file); + /* + * Update last_act too so that poll()/POLLPRI can wait for the next + * event after any syscall on hist file. + */ + hist_file->last_act = hist_file->last_read;
- out_unlock: - mutex_unlock(&event_mutex); + return 0; +} + +static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait) +{ + struct trace_event_file *event_file; + struct seq_file *m = file->private_data; + struct hist_file_data *hist_file = m->private; + __poll_t ret = 0; + u64 cnt; + + guard(mutex)(&event_mutex); + + event_file = event_file_data(file); + if (!event_file) + return EPOLLERR; + + hist_poll_wait(file, wait); + + cnt = get_hist_hit_count(event_file); + if (hist_file->last_read != cnt) + ret |= EPOLLIN | EPOLLRDNORM; + if (hist_file->last_act != cnt) { + hist_file->last_act = cnt; + ret |= EPOLLPRI; + }
return ret; }
+static int event_hist_release(struct inode *inode, struct file *file) +{ + struct seq_file *m = file->private_data; + struct hist_file_data *hist_file = m->private; + + kfree(hist_file); + return tracing_single_release_file_tr(inode, file); +} + static int event_hist_open(struct inode *inode, struct file *file) { + struct trace_event_file *event_file; + struct hist_file_data *hist_file; int ret;
ret = tracing_open_file_tr(inode, file); if (ret) return ret;
+ guard(mutex)(&event_mutex); + + event_file = event_file_data(file); + if (!event_file) { + ret = -ENODEV; + goto err; + } + + hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL); + if (!hist_file) { + ret = -ENOMEM; + goto err; + } + + hist_file->file = file; + hist_file->last_act = get_hist_hit_count(event_file); + /* Clear private_data to avoid warning in single_open() */ file->private_data = NULL; - return single_open(file, hist_show, file); + ret = single_open(file, hist_show, hist_file); + if (ret) { + kfree(hist_file); + goto err; + } + + return 0; +err: + tracing_release_file_tr(inode, file); + return ret; }
const struct file_operations event_hist_fops = { .open = event_hist_open, .read = seq_read, .llseek = seq_lseek, - .release = tracing_single_release_file_tr, + .release = event_hist_release, + .poll = event_hist_poll, };
#ifdef CONFIG_HIST_TRIGGERS_DEBUG @@ -5884,25 +5974,19 @@ static int hist_debug_show(struct seq_file *m, void *v) { struct event_trigger_data *data; struct trace_event_file *event_file; - int n = 0, ret = 0; + int n = 0;
- mutex_lock(&event_mutex); + guard(mutex)(&event_mutex);
event_file = event_file_file(m->private); - if (unlikely(!event_file)) { - ret = -ENODEV; - goto out_unlock; - } + if (unlikely(!event_file)) + return -ENODEV;
list_for_each_entry(data, &event_file->triggers, list) { if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) hist_trigger_debug_show(m, data, n++); } - - out_unlock: - mutex_unlock(&event_mutex); - - return ret; + return 0; }
static int event_hist_debug_open(struct inode *inode, struct file *file) @@ -5915,7 +5999,10 @@ static int event_hist_debug_open(struct inode *inode, struct file *file)
/* Clear private_data to avoid warning in single_open() */ file->private_data = NULL; - return single_open(file, hist_debug_show, file); + ret = single_open(file, hist_debug_show, file); + if (ret) + tracing_release_file_tr(inode, file); + return ret; }
const struct file_operations event_hist_debug_fops = { diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 624e0867316d..ccd6703ac50b 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -312,7 +312,7 @@ static const char *synth_field_fmt(char *type) else if (strcmp(type, "gfp_t") == 0) fmt = "%x"; else if (synth_field_is_string(type)) - fmt = "%.*s"; + fmt = "%s"; else if (synth_field_is_stack(type)) fmt = "%s";
@@ -859,6 +859,38 @@ static struct trace_event_fields synth_event_fields_array[] = { {} };
+static int synth_event_reg(struct trace_event_call *call, + enum trace_reg type, void *data) +{ + struct synth_event *event = container_of(call, struct synth_event, call); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: +#endif + case TRACE_REG_REGISTER: + if (!try_module_get(event->mod)) + return -EBUSY; + break; + default: + break; + } + + int ret = trace_event_reg(call, type, data); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_UNREGISTER: +#endif + case TRACE_REG_UNREGISTER: + module_put(event->mod); + break; + default: + break; + } + return ret; +} + static int register_synth_event(struct synth_event *event) { struct trace_event_call *call = &event->call; @@ -888,7 +920,7 @@ static int register_synth_event(struct synth_event *event) goto out; } call->flags = TRACE_EVENT_FL_TRACEPOINT; - call->class->reg = trace_event_reg; + call->class->reg = synth_event_reg; call->class->probe = trace_event_raw_event_synth; call->data = event; call->tp = event->tp; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index c35fbaab2a47..4d4808186a0f 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1317,6 +1317,7 @@ void graph_trace_close(struct trace_iterator *iter) if (data) { free_percpu(data->cpu_data); kfree(data); + iter->private = NULL; } }
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index ba37f768e2f2..6c9db857fe0e 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -231,8 +231,6 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; }
static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index cc155590018f..5bd781359d38 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -2038,7 +2038,6 @@ static int start_kthread(unsigned int cpu)
if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); - stop_per_cpu_kthreads(); return -ENOMEM; }
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0469a04a355f..330aee1c1a49 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -168,8 +168,6 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; }
static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 778b4056700f..17254597accd 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -269,6 +269,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) if (ret < 0) goto error;
+ /* + * pipe_resize_ring() does not update nr_accounted for watch_queue + * pipes, because the above vastly overprovisions. Set nr_accounted on + * and max_usage this pipe to the number that was actually charged to + * the user above via account_pipe_buffers. + */ + pipe->max_usage = nr_pages; + pipe->nr_accounted = nr_pages; + ret = -ENOMEM; pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); if (!pages) diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index c02baa4168e1..055356508d97 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen, } if (repeat_count) { ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 34db0b3aa502..9493a1b28b9e 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -608,7 +608,6 @@ DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
static void overflow_allocation_test(struct kunit *test) { - const char device_name[] = "overflow-test"; struct device *dev; int count = 0;
@@ -618,7 +617,7 @@ static void overflow_allocation_test(struct kunit *test) } while (0)
/* Create dummy device for devm_kmalloc()-family tests. */ - dev = root_device_register(device_name); + dev = root_device_register("overflow-test"); KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), "Cannot register test device\n");
diff --git a/mm/memory.c b/mm/memory.c index 65f1865cb461..d04faa09eaf6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1268,12 +1268,12 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pgd_t *src_pgd, *dst_pgd; - unsigned long next; unsigned long addr = src_vma->vm_start; unsigned long end = src_vma->vm_end; struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; struct mmu_notifier_range range; + unsigned long next, pfn; bool is_cow; int ret;
@@ -1284,11 +1284,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { - /* - * We do not free on error cases below as remove_vma - * gets called on error from higher level routine - */ - ret = track_pfn_copy(src_vma); + ret = track_pfn_copy(dst_vma, src_vma, &pfn); if (ret) return ret; } @@ -1325,7 +1321,6 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) continue; if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, addr, next))) { - untrack_pfn_clear(dst_vma); ret = -ENOMEM; break; } @@ -1335,6 +1330,8 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) raw_write_seqcount_end(&src_mm->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } + if (ret && unlikely(src_vma->vm_flags & VM_PFNMAP)) + untrack_pfn_copy(dst_vma, pfn); return ret; }
@@ -5945,10 +5942,8 @@ void __might_fault(const char *file, int line) if (pagefault_disabled()) return; __might_sleep(file, line); -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(¤t->mm->mmap_lock); -#endif } EXPORT_SYMBOL(__might_fault); #endif diff --git a/net/can/af_can.c b/net/can/af_can.c index c469fc187f0c..cdad0be43e8f 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -287,8 +287,8 @@ int can_send(struct sk_buff *skb, int loop) netif_rx(newskb);
/* update statistics */ - pkg_stats->tx_frames++; - pkg_stats->tx_frames_delta++; + atomic_long_inc(&pkg_stats->tx_frames); + atomic_long_inc(&pkg_stats->tx_frames_delta);
return 0;
@@ -647,8 +647,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) int matches;
/* update statistics */ - pkg_stats->rx_frames++; - pkg_stats->rx_frames_delta++; + atomic_long_inc(&pkg_stats->rx_frames); + atomic_long_inc(&pkg_stats->rx_frames_delta);
/* create non-zero unique skb identifier together with *skb */ while (!(can_skb_prv(skb)->skbcnt)) @@ -669,8 +669,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) consume_skb(skb);
if (matches > 0) { - pkg_stats->matches++; - pkg_stats->matches_delta++; + atomic_long_inc(&pkg_stats->matches); + atomic_long_inc(&pkg_stats->matches_delta); } }
diff --git a/net/can/af_can.h b/net/can/af_can.h index 7c2d9161e224..22f3352c77fe 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -66,9 +66,9 @@ struct receiver { struct can_pkg_stats { unsigned long jiffies_init;
- unsigned long rx_frames; - unsigned long tx_frames; - unsigned long matches; + atomic_long_t rx_frames; + atomic_long_t tx_frames; + atomic_long_t matches;
unsigned long total_rx_rate; unsigned long total_tx_rate; @@ -82,9 +82,9 @@ struct can_pkg_stats { unsigned long max_tx_rate; unsigned long max_rx_match_ratio;
- unsigned long rx_frames_delta; - unsigned long tx_frames_delta; - unsigned long matches_delta; + atomic_long_t rx_frames_delta; + atomic_long_t tx_frames_delta; + atomic_long_t matches_delta; };
/* persistent statistics */ diff --git a/net/can/proc.c b/net/can/proc.c index bbce97825f13..25fdf060e30d 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t) struct can_pkg_stats *pkg_stats = net->can.pkg_stats; unsigned long j = jiffies; /* snapshot */
+ long rx_frames = atomic_long_read(&pkg_stats->rx_frames); + long tx_frames = atomic_long_read(&pkg_stats->tx_frames); + long matches = atomic_long_read(&pkg_stats->matches); + long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta); + long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta); + long matches_delta = atomic_long_read(&pkg_stats->matches_delta); + /* restart counting in timer context on user request */ if (user_reset) can_init_stats(net); @@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t) can_init_stats(net);
/* prevent overflow in calc_rate() */ - if (pkg_stats->rx_frames > (ULONG_MAX / HZ)) + if (rx_frames > (LONG_MAX / HZ)) can_init_stats(net);
/* prevent overflow in calc_rate() */ - if (pkg_stats->tx_frames > (ULONG_MAX / HZ)) + if (tx_frames > (LONG_MAX / HZ)) can_init_stats(net);
/* matches overflow - very improbable */ - if (pkg_stats->matches > (ULONG_MAX / 100)) + if (matches > (LONG_MAX / 100)) can_init_stats(net);
/* calc total values */ - if (pkg_stats->rx_frames) - pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) / - pkg_stats->rx_frames; + if (rx_frames) + pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames;
pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->tx_frames); + tx_frames); pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->rx_frames); + rx_frames);
/* calc current values */ - if (pkg_stats->rx_frames_delta) + if (rx_frames_delta) pkg_stats->current_rx_match_ratio = - (pkg_stats->matches_delta * 100) / - pkg_stats->rx_frames_delta; + (matches_delta * 100) / rx_frames_delta;
- pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta); - pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta); + pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta); + pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta);
/* check / update maximum values */ if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate) @@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t) pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
/* clear values for 'current rate' calculation */ - pkg_stats->tx_frames_delta = 0; - pkg_stats->rx_frames_delta = 0; - pkg_stats->matches_delta = 0; + atomic_long_set(&pkg_stats->tx_frames_delta, 0); + atomic_long_set(&pkg_stats->rx_frames_delta, 0); + atomic_long_set(&pkg_stats->matches_delta, 0);
/* restart timer (one second) */ mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ)); @@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v) struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
seq_putc(m, '\n'); - seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames); - seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames); - seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches); + seq_printf(m, " %8ld transmitted frames (TXF)\n", + atomic_long_read(&pkg_stats->tx_frames)); + seq_printf(m, " %8ld received frames (RXF)\n", + atomic_long_read(&pkg_stats->rx_frames)); + seq_printf(m, " %8ld matched frames (RXMF)\n", + atomic_long_read(&pkg_stats->matches));
seq_putc(m, '\n');
diff --git a/net/core/dst.c b/net/core/dst.c index 137b8d1c7220..aad197e761cb 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -167,6 +167,14 @@ static void dst_count_dec(struct dst_entry *dst) void dst_release(struct dst_entry *dst) { if (dst && rcuref_put(&dst->__rcuref)) { +#ifdef CONFIG_DST_CACHE + if (dst->flags & DST_METADATA) { + struct metadata_dst *md_dst = (struct metadata_dst *)dst; + + if (md_dst->type == METADATA_IP_TUNNEL) + dst_cache_reset_now(&md_dst->u.tun_info.dst_cache); + } +#endif dst_count_dec(dst); call_rcu_hurry(&dst->rcu_head, dst_destroy_rcu); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4acde7067519..26c520d1af6e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1011,6 +1011,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, /* IFLA_VF_STATS_TX_DROPPED */ nla_total_size_64bit(sizeof(__u64))); } + if (dev->netdev_ops->ndo_get_vf_guid) + size += num_vfs * 2 * + nla_total_size(sizeof(struct ifla_vf_guid)); return size; } else return 0; diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 80ccd6661aa3..deb08cab4464 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -415,7 +415,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (!reply || skb->pkt_type == PACKET_HOST) + if (!reply) return 0;
if (skb->protocol == htons(ETH_P_IP)) @@ -450,7 +450,7 @@ static const struct nla_policy geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = { [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, - [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, };
static const struct nla_policy diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b84d18fcd9e2..dc91699ce032 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1412,12 +1412,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb) }
/* fully reclaim rmem/fwd memory allocated for skb */ -static void udp_rmem_release(struct sock *sk, int size, int partial, - bool rx_queue_lock_held) +static void udp_rmem_release(struct sock *sk, unsigned int size, + int partial, bool rx_queue_lock_held) { struct udp_sock *up = udp_sk(sk); struct sk_buff_head *sk_queue; - int amt; + unsigned int amt;
if (likely(partial)) { up->forward_deficit += size; @@ -1437,10 +1437,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, if (!rx_queue_lock_held) spin_lock(&sk_queue->lock);
- - sk_forward_alloc_add(sk, size); - amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); - sk_forward_alloc_add(sk, -amt); + amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); + sk_forward_alloc_add(sk, size - amt);
if (amt) __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); @@ -1630,7 +1628,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
static struct sk_buff *__first_packet_length(struct sock *sk, struct sk_buff_head *rcvq, - int *total) + unsigned int *total) { struct sk_buff *skb;
@@ -1663,8 +1661,8 @@ static int first_packet_length(struct sock *sk) { struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; struct sk_buff_head *sk_queue = &sk->sk_receive_queue; + unsigned int total = 0; struct sk_buff *skb; - int total = 0; int res;
spin_lock_bh(&rcvq->lock); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8360939acf85..bb9add46e382 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5750,6 +5750,27 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, } }
+static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb, + struct inet6_dev *idev) +{ + struct nlattr *nla; + + nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); + + nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, u32 ext_filter_mask) { @@ -5771,18 +5792,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
/* XXX - MC not implemented */
- if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS) - return 0; - - nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); - - nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) { + if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0) + goto nla_put_failure; + }
nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); if (!nla) diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 1578ed9e97d8..c07e3da08d2a 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1075,8 +1075,13 @@ static int calipso_sock_getattr(struct sock *sk, struct ipv6_opt_hdr *hop; int opt_len, len, ret_val = -ENOMSG, offset; unsigned char *opt; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return -EAFNOSUPPORT;
+ txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done;
@@ -1128,8 +1133,13 @@ static int calipso_sock_setattr(struct sock *sk, { int ret_val; struct ipv6_opt_hdr *old, *new; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return -EAFNOSUPPORT;
+ txopts = txopt_get(pinfo); old = NULL; if (txopts) old = txopts->hopopt; @@ -1156,8 +1166,13 @@ static int calipso_sock_setattr(struct sock *sk, static void calipso_sock_delattr(struct sock *sk) { struct ipv6_opt_hdr *new_hop; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + + if (!pinfo) + return;
+ txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3ab5ea55ff8c..2e98531fa51a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -414,12 +414,37 @@ static bool rt6_check_expired(const struct rt6_info *rt) return false; }
+static struct fib6_info * +rt6_multipath_first_sibling_rcu(const struct fib6_info *rt) +{ + struct fib6_info *iter; + struct fib6_node *fn; + + fn = rcu_dereference(rt->fib6_node); + if (!fn) + goto out; + iter = rcu_dereference(fn->leaf); + if (!iter) + goto out; + + while (iter) { + if (iter->fib6_metric == rt->fib6_metric && + rt6_qualify_for_ecmp(iter)) + return iter; + iter = rcu_dereference(iter->fib6_next); + } + +out: + return NULL; +} + void fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { - struct fib6_info *match = res->f6i; + struct fib6_info *first, *match = res->f6i; struct fib6_info *sibling; + int hash;
if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; @@ -442,16 +467,25 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, return; }
- if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound)) + first = rt6_multipath_first_sibling_rcu(match); + if (!first) goto out;
- list_for_each_entry_rcu(sibling, &match->fib6_siblings, + hash = fl6->mp_hash; + if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound) && + rt6_score_route(first->fib6_nh, first->fib6_flags, oif, + strict) >= 0) { + match = first; + goto out; + } + + list_for_each_entry_rcu(sibling, &first->fib6_siblings, fib6_siblings) { const struct fib6_nh *nh = sibling->fib6_nh; int nh_upper_bound;
nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); - if (fl6->mp_hash > nh_upper_bound) + if (hash > nh_upper_bound) continue; if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) break; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 5d71e8d084c4..64cf5589989b 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc jbenc@suse.cz * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */
#include <linux/module.h> @@ -1321,9 +1321,13 @@ static int _sta_info_move_state(struct sta_info *sta, sta->sta.addr, new_state);
/* notify the driver before the actual changes so it can - * fail the transition + * fail the transition if the state is increasing. + * The driver is required not to fail when the transition + * is decreasing the state, so first, do all the preparation + * work and only then, notify the driver. */ - if (test_sta_flag(sta, WLAN_STA_INSERTED)) { + if (new_state > sta->sta_state && + test_sta_flag(sta, WLAN_STA_INSERTED)) { int err = drv_sta_state(sta->local, sta->sdata, sta, sta->sta_state, new_state); if (err) @@ -1399,6 +1403,16 @@ static int _sta_info_move_state(struct sta_info *sta, break; }
+ if (new_state < sta->sta_state && + test_sta_flag(sta, WLAN_STA_INSERTED)) { + int err = drv_sta_state(sta->local, sta->sdata, sta, + sta->sta_state, new_state); + + WARN_ONCE(err, + "Driver is not allowed to fail if the sta_state is transitioning down the list: %d\n", + err); + } + sta->sta_state = new_state;
return 0; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 9e9544f81942..18ae39cf4188 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2669,11 +2669,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, err = nft_netdev_register_hooks(ctx->net, &hook.list); if (err < 0) goto err_hooks; + + unregister = true; } }
- unregister = true; - if (nla[NFTA_CHAIN_COUNTERS]) { if (!nft_is_base_chain(chain)) { err = -EOPNOTSUPP; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 0370f69dce86..2f1012bde1f3 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -308,7 +308,8 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
nft_setelem_expr_foreach(expr, elem_expr, size) { if (expr->ops->gc && - expr->ops->gc(read_pnet(&set->net), expr)) + expr->ops->gc(read_pnet(&set->net), expr) && + set->flags & NFT_SET_EVAL) return true; }
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index f735d79d8be5..d499eb3f4f29 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -333,13 +333,13 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = { [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 }, [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 }, - [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, };
static int nft_tunnel_obj_geneve_init(const struct nlattr *attr, struct nft_tunnel_opts *opts) { - struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len; + struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len); struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1]; int err, data_len;
@@ -621,7 +621,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, if (!inner) goto failure; while (opts->len > offset) { - opt = (struct geneve_opt *)opts->u.data + offset; + opt = (struct geneve_opt *)(opts->u.data + offset); if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS, opt->opt_class) || nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE, diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 9445ca97163b..6c5afb4ad67b 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -931,12 +931,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, pskb_trim(skb, ovs_mac_header_len(key)); }
- /* Need to set the pkt_type to involve the routing layer. The - * packet movement through the OVS datapath doesn't generally - * use routing, but this is needed for tunnel cases. - */ - skb->pkt_type = PACKET_OUTGOING; - if (likely(!mru || (skb->len <= mru + vport->dev->hard_header_len))) { ovs_vport_send(vport, skb, ovs_key_mac_proto(key)); diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 0c8aa7e686ea..99fb869aee91 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -68,7 +68,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, };
static const struct nla_policy diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index bcf1b8012b2c..b00e491e8130 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -748,7 +748,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, };
static const struct nla_policy diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 5df2dacb7b1a..05aa363a7fee 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -121,8 +121,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { - /* The incoming packet is the only packet in queue. */ - BUG_ON(sch->q.qlen != 1); q->lowest_prio = prio; q->highest_prio = prio; } else { @@ -154,7 +152,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { - BUG_ON(sch->q.qlen); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 622875a6f787..f8f1a49689da 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1485,7 +1485,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { + /* If the socket is already closing or it is in an error state, there + * is no point in waiting. + */ + while (sk->sk_state != TCP_ESTABLISHED && + sk->sk_state != TCP_CLOSING && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c index 6aba02a31c96..d0ee9001c7b3 100644 --- a/samples/ftrace/sample-trace-array.c +++ b/samples/ftrace/sample-trace-array.c @@ -105,7 +105,7 @@ static int __init sample_trace_array_init(void) * NOTE: This function increments the reference counter * associated with the trace array - "tr". */ - tr = trace_array_get_by_name("sample-instance"); + tr = trace_array_get_by_name("sample-instance", "sched,timer,kprobes");
if (!tr) return -1; diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh index 24086793b0d8..db40237e60ce 100755 --- a/scripts/selinux/install_policy.sh +++ b/scripts/selinux/install_policy.sh @@ -6,27 +6,24 @@ if [ `id -u` -ne 0 ]; then exit 1 fi
-SF=`which setfiles` -if [ $? -eq 1 ]; then +SF=`which setfiles` || { echo "Could not find setfiles" echo "Do you have policycoreutils installed?" exit 1 -fi +}
-CP=`which checkpolicy` -if [ $? -eq 1 ]; then +CP=`which checkpolicy` || { echo "Could not find checkpolicy" echo "Do you have checkpolicy installed?" exit 1 -fi +} VERS=`$CP -V | awk '{print $1}'`
-ENABLED=`which selinuxenabled` -if [ $? -eq 1 ]; then +ENABLED=`which selinuxenabled` || { echo "Could not find selinuxenabled" echo "Do you have libselinux-utils installed?" exit 1 -fi +}
if selinuxenabled; then echo "SELinux is already enabled" diff --git a/security/smack/smack.h b/security/smack/smack.h index 041688e5a77a..5e4a3c3144dd 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -152,6 +152,7 @@ struct smk_net4addr { struct smack_known *smk_label; /* label */ };
+#if IS_ENABLED(CONFIG_IPV6) /* * An entry in the table identifying IPv6 hosts. */ @@ -162,7 +163,9 @@ struct smk_net6addr { int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; +#endif /* CONFIG_IPV6 */
+#ifdef SMACK_IPV6_PORT_LABELING /* * An entry in the table identifying ports. */ @@ -175,6 +178,7 @@ struct smk_port_label { short smk_sock_type; /* Socket type */ short smk_can_reuse; }; +#endif /* SMACK_IPV6_PORT_LABELING */
struct smack_known_list_elem { struct list_head list; @@ -314,7 +318,9 @@ extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; +#if IS_ENABLED(CONFIG_IPV6) extern struct list_head smk_net6addr_list; +#endif /* CONFIG_IPV6 */
extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 4625674f0e95..d272cf8160d5 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2476,6 +2476,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) return NULL; }
+#if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address @@ -2543,6 +2544,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
return NULL; } +#endif /* CONFIG_IPV6 */
/** * smack_netlbl_add - Set the secattr on a socket @@ -2646,6 +2648,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) return rc; }
+#if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label @@ -2678,6 +2681,7 @@ static int smk_ipv6_check(struct smack_known *subject, rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } +#endif /* CONFIG_IPV6 */
#ifdef SMACK_IPV6_PORT_LABELING /** @@ -3010,7 +3014,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return 0; if (addrlen < offsetofend(struct sockaddr, sa_family)) return 0; - if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + +#if IS_ENABLED(CONFIG_IPV6) + if (sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; struct smack_known *rsp = NULL;
@@ -3030,6 +3036,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
return rc; } +#endif /* CONFIG_IPV6 */ + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) return 0; rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b3cd6090f46a..5179061f57b5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -584,6 +584,9 @@ static void alc_shutup_pins(struct hda_codec *codec) { struct alc_spec *spec = codec->spec;
+ if (spec->no_shutup_pins) + return; + switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: @@ -599,8 +602,7 @@ static void alc_shutup_pins(struct hda_codec *codec) alc_headset_mic_no_shutup(codec); break; default: - if (!spec->no_shutup_pins) - snd_hda_shutup_pins(codec); + snd_hda_shutup_pins(codec); break; } } @@ -4783,6 +4785,21 @@ static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, } }
+static void alc295_fixup_hp_mute_led_coefbit11(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef.idx = 0xb; + spec->mute_led_coef.mask = 3 << 3; + spec->mute_led_coef.on = 1 << 3; + spec->mute_led_coef.off = 1 << 4; + snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); + } +} + static void alc285_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -7331,6 +7348,7 @@ enum { ALC290_FIXUP_MONO_SPEAKERS_HSJACK, ALC290_FIXUP_SUBWOOFER, ALC290_FIXUP_SUBWOOFER_HSJACK, + ALC295_FIXUP_HP_MUTE_LED_COEFBIT11, ALC269_FIXUP_THINKPAD_ACPI, ALC269_FIXUP_DMIC_THINKPAD_ACPI, ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13, @@ -9068,6 +9086,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC283_FIXUP_INT_MIC, }, + [ALC295_FIXUP_HP_MUTE_LED_COEFBIT11] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_hp_mute_led_coefbit11, + }, [ALC298_FIXUP_SAMSUNG_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp, @@ -9976,6 +9998,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11), SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -10150,12 +10173,15 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1194, "ASUS UM3406KA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), @@ -10176,6 +10202,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x14f2, "ASUS VivoBook X515JA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2), @@ -10233,7 +10260,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1f63, "ASUS P5405CSA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index 5c8bb24909eb..bd73944758c6 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -39,7 +39,9 @@ static int cs35l41_spi_probe(struct spi_device *spi) return -ENOMEM;
spi->max_speed_hz = CS35L41_SPI_MAX_FREQ; - spi_setup(spi); + ret = spi_setup(spi); + if (ret < 0) + return ret;
spi_set_drvdata(spi, cs35l41); cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config); diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index a39de4a7df00..532d1d6958af 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -31,9 +31,7 @@ #include "rl6231.h" #include "rt5665.h"
-#define RT5665_NUM_SUPPLIES 3 - -static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = { +static const char * const rt5665_supply_names[] = { "AVDD", "MICVDD", "VBAT", @@ -46,7 +44,6 @@ struct rt5665_priv { struct gpio_desc *gpiod_ldo1_en; struct gpio_desc *gpiod_reset; struct snd_soc_jack *hs_jack; - struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES]; struct delayed_work jack_detect_work; struct delayed_work calibrate_work; struct delayed_work jd_check_work; @@ -4471,8 +4468,6 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0); - - regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); }
#ifdef CONFIG_PM @@ -4758,7 +4753,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c) { struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev); struct rt5665_priv *rt5665; - int i, ret; + int ret; unsigned int val;
rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv), @@ -4774,24 +4769,13 @@ static int rt5665_i2c_probe(struct i2c_client *i2c) else rt5665_parse_dt(rt5665, &i2c->dev);
- for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++) - rt5665->supplies[i].supply = rt5665_supply_names[i]; - - ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies), - rt5665->supplies); + ret = devm_regulator_bulk_get_enable(&i2c->dev, ARRAY_SIZE(rt5665_supply_names), + rt5665_supply_names); if (ret != 0) { dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret); return ret; }
- ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies), - rt5665->supplies); - if (ret != 0) { - dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret); - return ret; - } - - rt5665->gpiod_ldo1_en = devm_gpiod_get_optional(&i2c->dev, "realtek,ldo1-en", GPIOD_OUT_HIGH); diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index f8144bf4c90d..7128bcf3a743 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -742,6 +742,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].sink = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Playback"); + if (!data->dapm_routes[i].sink) + return -ENOMEM; data->dapm_routes[i].source = "CPU-Playback"; } } @@ -759,6 +761,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].source = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Capture"); + if (!data->dapm_routes[i].source) + return -ENOMEM; data->dapm_routes[i].sink = "CPU-Capture"; } } diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index 6a969874c927..5e0bdbd34a83 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -182,6 +182,8 @@ static int j721e_configure_refclk(struct j721e_priv *priv, clk_id = J721E_CLK_PARENT_48000; else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_44100]) clk_id = J721E_CLK_PARENT_44100; + else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_48000]) + clk_id = J721E_CLK_PARENT_48000; else return ret;
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 736ebceea233..a3a190d13db8 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -1974,7 +1974,7 @@ static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
obj->sym_map[src_sym_idx] = dst_sym_idx;
- if (sym_type == STT_SECTION && dst_sym) { + if (sym_type == STT_SECTION && dst_sec) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 6e59e7f578ff..9102ad5985cc 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -4089,7 +4089,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio * It may also insert a UD2 after calling a __noreturn function. */ prev_insn = prev_insn_same_sec(file, insn); - if (prev_insn->dead_end && + if (prev_insn && prev_insn->dead_end && (insn->type == INSN_BUG || (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && insn->jump_dest->type == INSN_BUG))) @@ -4511,35 +4511,6 @@ static int validate_sls(struct objtool_file *file) return warnings; }
-static bool ignore_noreturn_call(struct instruction *insn) -{ - struct symbol *call_dest = insn_call_dest(insn); - - /* - * FIXME: hack, we need a real noreturn solution - * - * Problem is, exc_double_fault() may or may not return, depending on - * whether CONFIG_X86_ESPFIX64 is set. But objtool has no visibility - * to the kernel config. - * - * Other potential ways to fix it: - * - * - have compiler communicate __noreturn functions somehow - * - remove CONFIG_X86_ESPFIX64 - * - read the .config file - * - add a cmdline option - * - create a generic objtool annotation format (vs a bunch of custom - * formats) and annotate it - */ - if (!strcmp(call_dest->name, "exc_double_fault")) { - /* prevent further unreachable warnings for the caller */ - insn->sym->warned = 1; - return true; - } - - return false; -} - static int validate_reachable_instructions(struct objtool_file *file) { struct instruction *insn, *prev_insn; @@ -4556,7 +4527,7 @@ static int validate_reachable_instructions(struct objtool_file *file) prev_insn = prev_insn_same_sec(file, insn); if (prev_insn && prev_insn->dead_end) { call_dest = insn_call_dest(prev_insn); - if (call_dest && !ignore_noreturn_call(prev_insn)) { + if (call_dest) { WARN_INSN(insn, "%s() is missing a __noreturn annotation", call_dest->name); warnings++; @@ -4579,6 +4550,8 @@ static int disas_funcs(const char *funcs) char *cmd;
cross_compile = getenv("CROSS_COMPILE"); + if (!cross_compile) + cross_compile = "";
objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '" "BEGIN { split(_funcs, funcs); }" diff --git a/tools/perf/bench/syscall.c b/tools/perf/bench/syscall.c index ea4dfc07cbd6..e7dc216f717f 100644 --- a/tools/perf/bench/syscall.c +++ b/tools/perf/bench/syscall.c @@ -22,8 +22,7 @@ #define __NR_fork -1 #endif
-#define LOOPS_DEFAULT 10000000 -static int loops = LOOPS_DEFAULT; +static int loops;
static const struct option options[] = { OPT_INTEGER('l', "loop", &loops, "Specify number of loops"), @@ -80,6 +79,18 @@ static int bench_syscall_common(int argc, const char **argv, int syscall) const char *name = NULL; int i;
+ switch (syscall) { + case __NR_fork: + case __NR_execve: + /* Limit default loop to 10000 times to save time */ + loops = 10000; + break; + default: + loops = 10000000; + break; + } + + /* Options -l and --loops override default above */ argc = parse_options(argc, argv, options, bench_syscall_usage, 0);
gettimeofday(&start, NULL); @@ -94,16 +105,9 @@ static int bench_syscall_common(int argc, const char **argv, int syscall) break; case __NR_fork: test_fork(); - /* Only loop 10000 times to save time */ - if (i == 10000) - loops = 10000; break; case __NR_execve: test_execve(); - /* Only loop 10000 times to save time */ - if (i == 10000) - loops = 10000; - break; default: break; } diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S index 75cf084a927d..577760046772 100644 --- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S +++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S @@ -26,3 +26,5 @@ skip: mov x0, #0 mov x8, #93 // __NR_exit syscall svc #0 + +.section .note.GNU-stack, "", @progbits diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c index afbd5869f6bf..9848310cee5f 100644 --- a/tools/perf/util/arm-spe.c +++ b/tools/perf/util/arm-spe.c @@ -37,6 +37,8 @@ #include "../../arch/arm64/include/asm/cputype.h" #define MAX_TIMESTAMP (~0ULL)
+#define is_ldst_op(op) (!!((op) & ARM_SPE_OP_LDST)) + struct arm_spe { struct auxtrace auxtrace; struct auxtrace_queues queues; @@ -520,6 +522,10 @@ static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 m union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA }; bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
+ /* Only synthesize data source for LDST operations */ + if (!is_ldst_op(record->op)) + return 0; + if (record->op & ARM_SPE_OP_LD) data_src.mem_op = PERF_MEM_OP_LOAD; else if (record->op & ARM_SPE_OP_ST) @@ -619,7 +625,7 @@ static int arm_spe_sample(struct arm_spe_queue *speq) * When data_src is zero it means the record is not a memory operation, * skip to synthesize memory sample for this case. */ - if (spe->sample_memory && data_src) { + if (spe->sample_memory && is_ldst_op(record->op)) { err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src); if (err) return err; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 1eadb4f7c1b9..f86a1eb4ea36 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1362,19 +1362,18 @@ static int evlist__create_syswide_maps(struct evlist *evlist) */ cpus = perf_cpu_map__new(NULL); if (!cpus) - goto out; + return -ENOMEM;
threads = perf_thread_map__new_dummy(); - if (!threads) - goto out_put; + if (!threads) { + perf_cpu_map__put(cpus); + return -ENOMEM; + }
perf_evlist__set_maps(&evlist->core, cpus, threads); - perf_thread_map__put(threads); -out_put: perf_cpu_map__put(cpus); -out: - return -ENOMEM; + return 0; }
int evlist__open(struct evlist *evlist) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 27393e432792..2587c4b463fa 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -597,7 +597,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, }; if (pmu_events_table__find_event(pmu->events_table, pmu, name, update_alias, &data) == 0) - pmu->cpu_json_aliases++; + pmu->cpu_common_json_aliases++; } pmu->sysfs_aliases++; break; @@ -1680,9 +1680,10 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu) if (pmu->cpu_aliases_added) nr += pmu->cpu_json_aliases; else if (pmu->events_table) - nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases; + nr += pmu_events_table__num_events(pmu->events_table, pmu) - + pmu->cpu_common_json_aliases; else - assert(pmu->cpu_json_aliases == 0); + assert(pmu->cpu_json_aliases == 0 && pmu->cpu_common_json_aliases == 0);
return pmu->selectable ? nr + 1 : nr; } diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index aca4238f06a6..5a03c361cb04 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -124,6 +124,11 @@ struct perf_pmu { uint32_t cpu_json_aliases; /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */ uint32_t sys_json_aliases; + /** + * @cpu_common_json_aliases: Number of json events that overlapped with sysfs when + * loading all sysfs events. + */ + uint32_t cpu_common_json_aliases; /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */ bool sysfs_aliases_loaded; /** diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index f0577aa7eca8..dda5ba9c73fd 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -587,11 +587,25 @@ char *perf_pmus__default_pmu_name(void) struct perf_pmu *evsel__find_pmu(const struct evsel *evsel) { struct perf_pmu *pmu = evsel->pmu; + bool legacy_core_type;
- if (!pmu) { - pmu = perf_pmus__find_by_type(evsel->core.attr.type); - ((struct evsel *)evsel)->pmu = pmu; + if (pmu) + return pmu; + + pmu = perf_pmus__find_by_type(evsel->core.attr.type); + legacy_core_type = + evsel->core.attr.type == PERF_TYPE_HARDWARE || + evsel->core.attr.type == PERF_TYPE_HW_CACHE; + if (!pmu && legacy_core_type) { + if (perf_pmus__supports_extended_type()) { + u32 type = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; + + pmu = perf_pmus__find_by_type(type); + } else { + pmu = perf_pmus__find_core_pmu(); + } } + ((struct evsel *)evsel)->pmu = pmu; return pmu; }
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index b01b0e551056..06a1e09d7349 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -238,7 +238,7 @@ struct pyrf_event { };
#define sample_members \ - sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ + sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"), \ sample_member_def(sample_pid, pid, T_INT, "event pid"), \ sample_member_def(sample_tid, tid, T_INT, "event tid"), \ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ @@ -671,6 +671,11 @@ static PyObject *pyrf_event__new(union perf_event *event) event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) return NULL;
+ // FIXME this better be dynamic or we need to parse everything + // before calling perf_mmap__consume(), including tracepoint fields. + if (sizeof(pevent->event) < event->header.size) + return NULL; + ptype = pyrf_event__type[event->header.type]; pevent = PyObject_New(struct pyrf_event, ptype); if (pevent != NULL) @@ -1170,20 +1175,22 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
evsel = evlist__event2evsel(evlist, event); if (!evsel) { + Py_DECREF(pyevent); Py_INCREF(Py_None); return Py_None; }
pevent->evsel = evsel;
- err = evsel__parse_sample(evsel, event, &pevent->sample); - - /* Consume the even only after we parsed it out. */ perf_mmap__consume(&md->core);
- if (err) + err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample); + if (err) { + Py_DECREF(pyevent); return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); + } + return pyevent; } end: diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 2affa4d45aa2..56b186d30745 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -154,6 +154,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type { const struct evsel *cur; int evsel_ctx = evsel_context(evsel); + struct perf_pmu *evsel_pmu = evsel__find_pmu(evsel);
evlist__for_each_entry(evsel->evlist, cur) { struct perf_stat_aggr *aggr; @@ -180,7 +181,7 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type * Except the SW CLOCK events, * ignore if not the PMU we're looking for. */ - if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu)) + if ((type != STAT_NSECS) && (evsel_pmu != evsel__find_pmu(cur))) continue;
aggr = &cur->stats->aggr[aggr_idx]; diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c index 32c39cfe209b..4c6a86e1cb54 100644 --- a/tools/perf/util/units.c +++ b/tools/perf/util/units.c @@ -64,7 +64,7 @@ unsigned long convert_unit(unsigned long value, char *unit)
int unit_number__scnprintf(char *buf, size_t size, u64 n) { - char unit[4] = "BKMG"; + char unit[] = "BKMG"; int i = 0;
while (((n / 1024) > 1) && (i < 3)) { diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index f79815b7e951..fff16cdc93f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -6,6 +6,10 @@ #include <test_progs.h> #include "bloom_filter_map.skel.h"
+#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + static void test_fail_cases(void) { LIBBPF_OPTS(bpf_map_create_opts, opts); @@ -69,6 +73,7 @@ static void test_success_cases(void)
/* Create a map */ opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + opts.numa_node = NUMA_NO_NODE; fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c index 18373a7df76e..f47bf88f8d2a 100644 --- a/tools/testing/selftests/bpf/progs/strncmp_bench.c +++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c @@ -35,7 +35,10 @@ static __always_inline int local_strncmp(const char *s1, unsigned int sz, SEC("tp/syscalls/sys_enter_getpgid") int strncmp_no_helper(void *ctx) { - if (local_strncmp(str, cmp_str_len + 1, target) < 0) + const char *target_str = target; + + barrier_var(target_str); + if (local_strncmp(str, cmp_str_len + 1, target_str) < 0) __sync_add_and_fetch(&hits, 1); return 0; } diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 6f2f83990441..76d37904172d 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -812,7 +812,7 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) mremap_size = thpsize / 2; mremap_mem = mmap(NULL, mremap_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (mem == MAP_FAILED) { + if (mremap_mem == MAP_FAILED) { ksft_test_result_fail("mmap() failed\n"); goto munmap; }
linux-stable-mirror@lists.linaro.org