I'm announcing the release of the 6.6.54 kernel.
All users of the 6.6 kernel series must upgrade.
The updated 6.6.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.6.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
.gitignore | 1 Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 | 2 Documentation/arch/arm64/silicon-errata.rst | 2 Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml | 1 Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml | 19 Documentation/driver-api/ipmi.rst | 2 Documentation/virt/kvm/locking.rst | 33 Makefile | 2 arch/arm/boot/dts/microchip/sam9x60.dtsi | 4 arch/arm/boot/dts/microchip/sama7g5.dtsi | 2 arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts | 2 arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts | 2 arch/arm/mach-ep93xx/clock.c | 2 arch/arm/mach-versatile/platsmp-realview.c | 1 arch/arm/vfp/vfpinstr.h | 48 arch/arm64/Kconfig | 2 arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts | 2 arch/arm64/boot/dts/mediatek/mt8186.dtsi | 12 arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi | 1 arch/arm64/boot/dts/mediatek/mt8195.dtsi | 12 arch/arm64/boot/dts/qcom/sa8775p.dtsi | 2 arch/arm64/boot/dts/renesas/r9a07g043u.dtsi | 4 arch/arm64/boot/dts/renesas/r9a07g044.dtsi | 4 arch/arm64/boot/dts/renesas/r9a07g054.dtsi | 4 arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts | 4 arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts | 2 arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts | 4 arch/arm64/boot/dts/ti/k3-j721e-sk.dts | 4 arch/arm64/include/asm/cputype.h | 2 arch/arm64/include/asm/esr.h | 88 - arch/arm64/include/uapi/asm/sigcontext.h | 6 arch/arm64/kernel/cpu_errata.c | 10 arch/arm64/kvm/hyp/nvhe/ffa.c | 21 arch/m68k/kernel/process.c | 2 arch/powerpc/crypto/Kconfig | 1 arch/powerpc/include/asm/asm-compat.h | 6 arch/powerpc/include/asm/atomic.h | 5 arch/powerpc/include/asm/uaccess.h | 7 arch/powerpc/kernel/head_8xx.S | 6 arch/powerpc/kernel/vdso/gettimeofday.S | 4 arch/powerpc/mm/nohash/8xx.c | 4 arch/riscv/include/asm/kvm_vcpu_pmu.h | 21 arch/riscv/kernel/perf_callchain.c | 2 arch/riscv/kvm/vcpu_sbi.c | 4 arch/x86/coco/tdx/tdx.c | 6 arch/x86/events/intel/pt.c | 15 arch/x86/include/asm/acpi.h | 8 arch/x86/include/asm/hardirq.h | 8 arch/x86/include/asm/idtentry.h | 73 arch/x86/kernel/acpi/boot.c | 11 arch/x86/kernel/cpu/sgx/main.c | 27 arch/x86/kernel/jailhouse.c | 1 arch/x86/kernel/mmconf-fam10h_64.c | 1 arch/x86/kernel/process_64.c | 29 arch/x86/kernel/smpboot.c | 1 arch/x86/kernel/x86_init.c | 1 arch/x86/kvm/lapic.c | 35 arch/x86/mm/tlb.c | 7 arch/x86/pci/fixup.c | 4 arch/x86/xen/mmu_pv.c | 5 arch/x86/xen/p2m.c | 98 + arch/x86/xen/setup.c | 203 ++ arch/x86/xen/xen-ops.h | 6 block/bfq-iosched.c | 81 - block/partitions/core.c | 8 crypto/asymmetric_keys/asymmetric_type.c | 7 crypto/xor.c | 31 drivers/acpi/cppc_acpi.c | 43 drivers/acpi/device_sysfs.c | 5 drivers/acpi/pmic/tps68470_pmic.c | 6 drivers/acpi/resource.c | 6 drivers/ata/libata-eh.c | 8 drivers/ata/libata-scsi.c | 5 drivers/base/core.c | 15 drivers/base/firmware_loader/main.c | 30 drivers/base/module.c | 14 drivers/base/power/domain.c | 2 drivers/block/drbd/drbd_main.c | 6 drivers/block/drbd/drbd_state.c | 2 drivers/block/nbd.c | 13 drivers/block/ublk_drv.c | 62 drivers/bluetooth/btusb.c | 5 drivers/bus/arm-integrator-lm.c | 1 drivers/bus/mhi/host/pci_generic.c | 13 drivers/char/hw_random/bcm2835-rng.c | 4 drivers/char/hw_random/cctrng.c | 1 drivers/char/hw_random/mtk-rng.c | 2 drivers/char/tpm/tpm-dev-common.c | 2 drivers/char/tpm/tpm2-space.c | 3 drivers/clk/at91/sama7g5.c | 5 drivers/clk/imx/clk-composite-7ulp.c | 7 drivers/clk/imx/clk-composite-8m.c | 63 drivers/clk/imx/clk-composite-93.c | 15 drivers/clk/imx/clk-fracn-gppll.c | 4 drivers/clk/imx/clk-imx6ul.c | 4 drivers/clk/imx/clk-imx8mp-audiomix.c | 13 drivers/clk/imx/clk-imx8mp.c | 4 drivers/clk/imx/clk-imx8qxp.c | 10 drivers/clk/qcom/clk-alpha-pll.c | 52 drivers/clk/qcom/clk-alpha-pll.h | 2 drivers/clk/qcom/dispcc-sm8250.c | 9 drivers/clk/qcom/dispcc-sm8550.c | 14 drivers/clk/qcom/gcc-ipq5332.c | 1 drivers/clk/rockchip/clk-rk3228.c | 2 drivers/clk/rockchip/clk-rk3588.c | 2 drivers/clk/starfive/clk-starfive-jh7110-vout.c | 2 drivers/clk/ti/clk-dra7-atl.c | 1 drivers/clocksource/timer-qcom.c | 7 drivers/cpufreq/ti-cpufreq.c | 10 drivers/cpuidle/cpuidle-riscv-sbi.c | 21 drivers/crypto/caam/caamhash.c | 1 drivers/crypto/ccp/sev-dev.c | 2 drivers/crypto/hisilicon/hpre/hpre_main.c | 54 drivers/crypto/hisilicon/qm.c | 151 + drivers/crypto/hisilicon/sec2/sec_main.c | 16 drivers/crypto/hisilicon/zip/zip_main.c | 23 drivers/cxl/core/pci.c | 8 drivers/edac/igen6_edac.c | 2 drivers/edac/synopsys_edac.c | 85 - drivers/firewire/core-cdev.c | 2 drivers/firmware/arm_scmi/optee.c | 7 drivers/firmware/efi/libstub/tpm.c | 2 drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 4 drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 29 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 16 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 9 drivers/gpu/drm/amd/display/dc/dc_dsc.h | 3 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 6 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 5 drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 2 drivers/gpu/drm/bridge/lontium-lt8912b.c | 35 drivers/gpu/drm/exynos/exynos_drm_gsc.c | 2 drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 32 drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 12 drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 2 drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 30 drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 2 drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 12 drivers/gpu/drm/radeon/evergreen_cs.c | 62 drivers/gpu/drm/radeon/radeon_atombios.c | 29 drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 2 drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 4 drivers/gpu/drm/stm/drv.c | 4 drivers/gpu/drm/stm/ltdc.c | 2 drivers/gpu/drm/vc4/vc4_hdmi.c | 8 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 13 drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 3 drivers/hid/wacom_wac.c | 13 drivers/hid/wacom_wac.h | 2 drivers/hwmon/max16065.c | 27 drivers/hwmon/ntc_thermistor.c | 1 drivers/hwtracing/coresight/coresight-tmc-etr.c | 2 drivers/i2c/busses/i2c-aspeed.c | 16 drivers/i2c/busses/i2c-isch.c | 3 drivers/iio/adc/ad7606.c | 8 drivers/iio/adc/ad7606_spi.c | 5 drivers/iio/chemical/bme680_core.c | 7 drivers/iio/magnetometer/ak8975.c | 85 - drivers/infiniband/core/cache.c | 4 drivers/infiniband/core/iwcm.c | 2 drivers/infiniband/hw/cxgb4/cm.c | 5 drivers/infiniband/hw/erdma/erdma_verbs.c | 25 drivers/infiniband/hw/hns/hns_roce_hem.c | 22 drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 33 drivers/infiniband/hw/hns/hns_roce_qp.c | 16 drivers/infiniband/hw/irdma/verbs.c | 2 drivers/infiniband/hw/mlx5/main.c | 2 drivers/infiniband/hw/mlx5/mr.c | 14 drivers/infiniband/ulp/rtrs/rtrs-clt.c | 9 drivers/infiniband/ulp/rtrs/rtrs-srv.c | 1 drivers/input/keyboard/adp5588-keys.c | 2 drivers/input/serio/i8042-acpipnpio.h | 37 drivers/input/touchscreen/ilitek_ts_i2c.c | 18 drivers/interconnect/icc-clk.c | 3 drivers/iommu/amd/io_pgtable_v2.c | 2 drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 28 drivers/iommu/iommufd/io_pagetable.c | 8 drivers/leds/leds-bd2606mvv.c | 23 drivers/leds/leds-pca995x.c | 78 drivers/md/dm-rq.c | 4 drivers/md/dm.c | 11 drivers/media/dvb-frontends/rtl2830.c | 2 drivers/media/dvb-frontends/rtl2832.c | 2 drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c | 9 drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c | 9 drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c | 10 drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c | 1 drivers/media/tuners/tuner-i2c.h | 4 drivers/mtd/devices/powernv_flash.c | 3 drivers/mtd/devices/slram.c | 2 drivers/mtd/nand/raw/mtk_nand.c | 36 drivers/net/bareudp.c | 26 drivers/net/bonding/bond_main.c | 6 drivers/net/can/m_can/m_can.c | 14 drivers/net/can/usb/esd_usb.c | 6 drivers/net/ethernet/freescale/enetc/enetc.c | 3 drivers/net/ethernet/realtek/r8169_phy_config.c | 2 drivers/net/ethernet/seeq/ether3.c | 2 drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 3 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 drivers/net/ethernet/wangxun/libwx/wx_lib.c | 2 drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 37 drivers/net/usb/usbnet.c | 37 drivers/net/virtio_net.c | 10 drivers/net/wireless/ath/ath12k/mac.c | 5 drivers/net/wireless/ath/ath12k/wmi.c | 1 drivers/net/wireless/ath/ath12k/wmi.h | 3 drivers/net/wireless/ath/ath9k/debug.c | 2 drivers/net/wireless/ath/ath9k/htc_drv_debug.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 26 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c | 115 - drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h | 147 + drivers/net/wireless/intel/iwlwifi/cfg/bz.c | 11 drivers/net/wireless/intel/iwlwifi/iwl-config.h | 1 drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 2 drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 36 drivers/net/wireless/mediatek/mt76/mac80211.c | 2 drivers/net/wireless/mediatek/mt76/mt7603/dma.c | 4 drivers/net/wireless/mediatek/mt76/mt7615/init.c | 3 drivers/net/wireless/mediatek/mt76/mt7915/init.c | 2 drivers/net/wireless/mediatek/mt76/mt7915/main.c | 3 drivers/net/wireless/mediatek/mt76/mt7921/init.c | 2 drivers/net/wireless/mediatek/mt76/mt7996/init.c | 65 drivers/net/wireless/mediatek/mt76/mt7996/main.c | 6 drivers/net/wireless/mediatek/mt76/mt7996/mcu.c | 23 drivers/net/wireless/mediatek/mt76/mt7996/mcu.h | 4 drivers/net/wireless/microchip/wilc1000/hif.c | 4 drivers/net/wireless/realtek/rtw88/coex.c | 38 drivers/net/wireless/realtek/rtw88/fw.c | 13 drivers/net/wireless/realtek/rtw88/main.c | 7 drivers/net/wireless/realtek/rtw88/rtw8821cu.c | 2 drivers/net/wireless/realtek/rtw88/rtw8822c.c | 10 drivers/ntb/hw/intel/ntb_hw_gen1.c | 2 drivers/ntb/ntb_transport.c | 23 drivers/ntb/test/ntb_perf.c | 2 drivers/nvdimm/namespace_devs.c | 34 drivers/nvme/host/multipath.c | 2 drivers/pci/controller/dwc/pci-dra7xx.c | 3 drivers/pci/controller/dwc/pci-imx6.c | 7 drivers/pci/controller/dwc/pci-keystone.c | 2 drivers/pci/controller/dwc/pcie-kirin.c | 4 drivers/pci/controller/pcie-xilinx-nwl.c | 39 drivers/pci/pci.c | 20 drivers/pci/pci.h | 6 drivers/pci/quirks.c | 31 drivers/perf/alibaba_uncore_drw_pmu.c | 2 drivers/perf/arm-cmn.c | 242 +-- drivers/perf/hisilicon/hisi_pcie_pmu.c | 16 drivers/pinctrl/bcm/pinctrl-ns.c | 8 drivers/pinctrl/berlin/berlin-bg2.c | 8 drivers/pinctrl/berlin/berlin-bg2cd.c | 8 drivers/pinctrl/berlin/berlin-bg2q.c | 8 drivers/pinctrl/berlin/berlin-bg4ct.c | 9 drivers/pinctrl/berlin/pinctrl-as370.c | 9 drivers/pinctrl/mvebu/pinctrl-armada-38x.c | 9 drivers/pinctrl/mvebu/pinctrl-armada-39x.c | 9 drivers/pinctrl/mvebu/pinctrl-armada-ap806.c | 5 drivers/pinctrl/mvebu/pinctrl-armada-cp110.c | 6 drivers/pinctrl/mvebu/pinctrl-armada-xp.c | 9 drivers/pinctrl/mvebu/pinctrl-dove.c | 48 drivers/pinctrl/mvebu/pinctrl-kirkwood.c | 7 drivers/pinctrl/mvebu/pinctrl-orion.c | 7 drivers/pinctrl/nomadik/pinctrl-abx500.c | 9 drivers/pinctrl/nomadik/pinctrl-nomadik.c | 10 drivers/pinctrl/pinctrl-at91.c | 11 drivers/pinctrl/pinctrl-single.c | 3 drivers/pinctrl/pinctrl-xway.c | 11 drivers/pinctrl/ti/pinctrl-ti-iodelay.c | 113 - drivers/power/supply/axp20x_battery.c | 16 drivers/power/supply/max17042_battery.c | 5 drivers/powercap/intel_rapl_common.c | 2 drivers/pps/clients/pps_parport.c | 14 drivers/regulator/of_regulator.c | 2 drivers/remoteproc/imx_rproc.c | 6 drivers/reset/reset-berlin.c | 3 drivers/reset/reset-k210.c | 3 drivers/scsi/NCR5380.c | 78 drivers/scsi/elx/libefc/efc_nport.c | 2 drivers/scsi/mac_scsi.c | 162 +- drivers/scsi/sd.c | 2 drivers/scsi/smartpqi/smartpqi_init.c | 20 drivers/soc/fsl/qe/tsa.c | 2 drivers/soc/qcom/smd-rpm.c | 35 drivers/soc/versatile/soc-integrator.c | 1 drivers/soc/versatile/soc-realview.c | 20 drivers/spi/atmel-quadspi.c | 15 drivers/spi/spi-bcmbca-hsspi.c | 8 drivers/spi/spi-fsl-lpspi.c | 1 drivers/spi/spi-nxp-fspi.c | 54 drivers/spi/spi-ppc4xx.c | 7 drivers/thunderbolt/switch.c | 331 +++- drivers/thunderbolt/tb.c | 784 +++++++--- drivers/thunderbolt/tb.h | 56 drivers/thunderbolt/tb_regs.h | 9 drivers/thunderbolt/tunnel.c | 217 +- drivers/thunderbolt/tunnel.h | 26 drivers/thunderbolt/usb4.c | 116 + drivers/tty/serial/8250/8250_omap.c | 2 drivers/tty/serial/qcom_geni_serial.c | 31 drivers/tty/serial/rp2.c | 2 drivers/tty/serial/serial_core.c | 14 drivers/ufs/host/ufs-qcom.c | 2 drivers/usb/cdns3/cdnsp-ring.c | 6 drivers/usb/cdns3/host.c | 4 drivers/usb/class/cdc-acm.c | 2 drivers/usb/dwc2/drd.c | 9 drivers/usb/host/xhci-mem.c | 5 drivers/usb/host/xhci-pci.c | 15 drivers/usb/host/xhci-ring.c | 14 drivers/usb/host/xhci.h | 3 drivers/usb/misc/appledisplay.c | 15 drivers/usb/misc/cypress_cy7c63.c | 4 drivers/usb/misc/yurex.c | 24 drivers/vhost/vdpa.c | 16 drivers/video/fbdev/hpfb.c | 1 drivers/watchdog/imx_sc_wdt.c | 24 drivers/xen/swiotlb-xen.c | 10 fs/btrfs/btrfs_inode.h | 47 fs/btrfs/ctree.h | 2 fs/btrfs/extent-tree.c | 4 fs/btrfs/file.c | 34 fs/btrfs/ioctl.c | 4 fs/btrfs/subpage.c | 10 fs/btrfs/tree-checker.c | 2 fs/cachefiles/xattr.c | 34 fs/crypto/fname.c | 8 fs/ecryptfs/crypto.c | 10 fs/erofs/inode.c | 20 fs/ext4/ialloc.c | 14 fs/ext4/inline.c | 35 fs/ext4/mballoc.c | 10 fs/ext4/super.c | 29 fs/f2fs/compress.c | 87 - fs/f2fs/data.c | 14 fs/f2fs/dir.c | 3 fs/f2fs/extent_cache.c | 4 fs/f2fs/f2fs.h | 48 fs/f2fs/file.c | 167 +- fs/f2fs/inode.c | 5 fs/f2fs/namei.c | 69 fs/f2fs/segment.c | 8 fs/f2fs/super.c | 20 fs/f2fs/xattr.c | 14 fs/fcntl.c | 14 fs/inode.c | 4 fs/jfs/jfs_dmap.c | 4 fs/jfs/jfs_imap.c | 2 fs/namei.c | 6 fs/namespace.c | 14 fs/nfs/nfs4state.c | 1 fs/nfsd/filecache.c | 3 fs/nfsd/nfs4idmap.c | 13 fs/nfsd/nfs4recover.c | 8 fs/nilfs2/btree.c | 12 fs/smb/server/vfs.c | 19 include/acpi/cppc_acpi.h | 2 include/linux/bitmap.h | 77 include/linux/bpf.h | 7 include/linux/f2fs_fs.h | 2 include/linux/fs.h | 11 include/linux/mm.h | 4 include/linux/mm_types.h | 31 include/linux/sbitmap.h | 2 include/linux/sched/numa_balancing.h | 10 include/linux/usb/usbnet.h | 15 include/linux/xarray.h | 6 include/net/bluetooth/hci_core.h | 4 include/net/ip.h | 2 include/net/mac80211.h | 7 include/net/tcp.h | 21 include/sound/tas2781.h | 8 include/trace/events/f2fs.h | 3 include/trace/events/sched.h | 52 io_uring/io-wq.c | 25 io_uring/io_uring.c | 4 io_uring/sqpoll.c | 12 kernel/bpf/btf.c | 8 kernel/bpf/helpers.c | 12 kernel/bpf/syscall.c | 4 kernel/bpf/verifier.c | 57 kernel/kthread.c | 10 kernel/locking/lockdep.c | 48 kernel/module/Makefile | 2 kernel/padata.c | 6 kernel/rcu/tree_nocb.h | 5 kernel/sched/fair.c | 134 + kernel/trace/bpf_trace.c | 15 lib/debugobjects.c | 5 lib/sbitmap.c | 4 lib/test_xarray.c | 93 + lib/xarray.c | 49 lib/xz/xz_crc32.c | 2 lib/xz/xz_private.h | 4 mm/damon/vaddr.c | 2 mm/filemap.c | 50 mm/mmap.c | 4 mm/util.c | 2 net/bluetooth/hci_conn.c | 6 net/bluetooth/hci_sync.c | 5 net/bluetooth/mgmt.c | 13 net/can/bcm.c | 4 net/can/j1939/transport.c | 8 net/core/filter.c | 50 net/core/sock_map.c | 1 net/ipv4/icmp.c | 103 - net/ipv6/Kconfig | 1 net/ipv6/icmp.c | 28 net/ipv6/netfilter/nf_reject_ipv6.c | 14 net/ipv6/route.c | 2 net/ipv6/rpl_iptunnel.c | 12 net/mac80211/iface.c | 17 net/mac80211/offchannel.c | 1 net/mac80211/rate.c | 2 net/mac80211/scan.c | 2 net/mac80211/tx.c | 2 net/netfilter/nf_conntrack_netlink.c | 7 net/netfilter/nf_tables_api.c | 16 net/qrtr/af_qrtr.c | 2 net/tipc/bcast.c | 2 net/wireless/nl80211.c | 3 net/wireless/scan.c | 6 net/wireless/sme.c | 3 samples/bpf/Makefile | 6 security/bpf/hooks.c | 1 security/smack/smackfs.c | 2 sound/pci/hda/cs35l41_hda_spi.c | 1 sound/pci/hda/tas2781_hda_i2c.c | 14 sound/soc/codecs/rt5682.c | 4 sound/soc/codecs/rt5682s.c | 4 sound/soc/codecs/tas2781-comlib.c | 4 sound/soc/codecs/tas2781-fmwlib.c | 1 sound/soc/codecs/tas2781-i2c.c | 58 sound/soc/loongson/loongson_card.c | 4 tools/bpf/runqslower/Makefile | 3 tools/perf/builtin-annotate.c | 2 tools/perf/builtin-inject.c | 1 tools/perf/builtin-mem.c | 1 tools/perf/builtin-report.c | 11 tools/perf/builtin-sched.c | 8 tools/perf/builtin-top.c | 3 tools/perf/ui/browsers/annotate.c | 77 tools/perf/ui/browsers/hists.c | 34 tools/perf/ui/browsers/hists.h | 2 tools/perf/util/annotate.c | 116 - tools/perf/util/annotate.h | 37 tools/perf/util/block-info.c | 10 tools/perf/util/block-info.h | 3 tools/perf/util/hist.h | 25 tools/perf/util/session.c | 3 tools/perf/util/sort.c | 14 tools/perf/util/stat-display.c | 3 tools/perf/util/time-utils.c | 4 tools/perf/util/tool.h | 1 tools/power/cpupower/lib/powercap.c | 8 tools/testing/selftests/arm64/signal/Makefile | 2 tools/testing/selftests/arm64/signal/sve_helpers.c | 56 tools/testing/selftests/arm64/signal/sve_helpers.h | 21 tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c | 46 tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c | 30 tools/testing/selftests/arm64/signal/testcases/ssve_regs.c | 36 tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c | 36 tools/testing/selftests/arm64/signal/testcases/sve_regs.c | 32 tools/testing/selftests/arm64/signal/testcases/za_no_regs.c | 32 tools/testing/selftests/arm64/signal/testcases/za_regs.c | 36 tools/testing/selftests/bpf/Makefile | 30 tools/testing/selftests/bpf/bench.c | 1 tools/testing/selftests/bpf/bench.h | 1 tools/testing/selftests/bpf/map_tests/sk_storage_map.c | 2 tools/testing/selftests/bpf/network_helpers.c | 24 tools/testing/selftests/bpf/network_helpers.h | 4 tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c | 2 tools/testing/selftests/bpf/prog_tests/core_reloc.c | 1 tools/testing/selftests/bpf/prog_tests/decap_sanity.c | 1 tools/testing/selftests/bpf/prog_tests/flow_dissector.c | 3 tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 1 tools/testing/selftests/bpf/prog_tests/lwt_helpers.h | 2 tools/testing/selftests/bpf/prog_tests/lwt_redirect.c | 2 tools/testing/selftests/bpf/prog_tests/lwt_reroute.c | 2 tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c | 154 + tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c | 1 tools/testing/selftests/bpf/prog_tests/sk_lookup.c | 1 tools/testing/selftests/bpf/prog_tests/tc_redirect.c | 12 tools/testing/selftests/bpf/prog_tests/tcp_rtt.c | 1 tools/testing/selftests/bpf/prog_tests/user_ringbuf.c | 1 tools/testing/selftests/bpf/progs/cg_storage_multi.h | 2 tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c | 1 tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c | 17 tools/testing/selftests/bpf/test_cpp.cpp | 4 tools/testing/selftests/bpf/test_lru_map.c | 3 tools/testing/selftests/bpf/test_progs.c | 18 tools/testing/selftests/bpf/testing_helpers.c | 4 tools/testing/selftests/bpf/unpriv_helpers.c | 1 tools/testing/selftests/bpf/veristat.c | 8 tools/testing/selftests/bpf/xdp_hw_metadata.c | 14 tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc | 2 tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc | 2 virt/kvm/kvm_main.c | 31 499 files changed, 6253 insertions(+), 3268 deletions(-)
Aaron Lu (1): x86/sgx: Fix deadlock in SGX NUMA node search
Adrian Hunter (1): perf/x86/intel/pt: Fix sampling synchronization
Aleksandr Mishin (2): ACPI: PMIC: Remove unneeded check in tps68470_pmic_opregion_probe() drm/msm: Fix incorrect file name output in adreno_request_fw()
Alex Bee (1): drm/rockchip: vop: Allow 4096px width scaling
Alex Deucher (2): drm/amdgpu: properly handle vbios fake edid sizing drm/radeon: properly handle vbios fake edid sizing
Alexander Dahl (3): ARM: dts: microchip: sam9x60: Fix rtc/rtt clocks spi: atmel-quadspi: Avoid overwriting delay register settings spi: atmel-quadspi: Fix wrong register value written to MR
Alexander Shiyan (1): clk: rockchip: rk3588: Fix 32k clock name for pmu_24m_32k_100m_src_p
Alexandra Diupina (1): PCI: kirin: Fix buffer overflow in kirin_pcie_parse_port()
Alexei Starovoitov (1): selftests/bpf: Workaround strict bpf_lsm return value check.
Alexey Gladkov (Intel) (1): x86/tdx: Fix "in-kernel MMIO" check
Anastasia Belova (1): arm64: esr: Define ESR_ELx_EC_* constants as UL
Andre Przywara (1): kselftest/arm64: signal: fix/refactor SVE vector length enumeration
Andrew Davis (3): arm64: dts: ti: k3-j721e-sk: Fix reversed C6x carveout locations arm64: dts: ti: k3-j721e-beagleboneai64: Fix reversed C6x carveout locations hwmon: (max16065) Remove use of i2c_match_id()
Andrew Jones (1): RISC-V: KVM: Fix sbiret init before forwarding to userspace
André Apitzsch (1): iio: magnetometer: ak8975: Fix 'Unexpected device' error
Andy Shevchenko (2): spi: ppc4xx: Avoid returning 0 when failed to parse and map IRQ i2c: isch: Add missed 'else'
AngeloGioacchino Del Regno (1): arm64: dts: mediatek: mt8186: Fix supported-hw mask for GPU OPPs
Ankit Agrawal (1): clocksource/drivers/qcom: Add missing iounmap() on errors in msm_dt_timer_init()
Antoniu Miclaus (1): ABI: testing: fix admv8818 attr description
Ard Biesheuvel (1): efistub/tpm: Use ACPI reclaim memory for event log to avoid corruption
Arend van Spriel (3): wifi: brcmfmac: export firmware interface functions wifi: brcmfmac: introducing fwil query functions wifi: brcmfmac: add linefeed at end of file
Artur Weber (1): power: supply: max17042_battery: Fix SOC threshold calc w/ no current sense
Atish Patra (2): RISC-V: KVM: Allow legacy PMU access from guest RISC-V: KVM: Fix to allow hpmcounter31 from the guest
Avraham Stern (1): wifi: iwlwifi: mvm: increase the time between ranging measurements
Baochen Qiang (1): wifi: ath12k: fix invalid AMPDU factor calculation in ath12k_peer_assoc_h_he()
Benjamin Lin (1): wifi: mt76: mt7996: ensure 4-byte alignment for beacon commands
Biju Das (2): media: platform: rzg2l-cru: rzg2l-csi2: Add missing MODULE_DEVICE_TABLE iio: magnetometer: ak8975: Convert enum->pointer for data in the match tables
Bitterblue Smith (2): wifi: rtw88: Fix USB/SDIO devices not transmitting beacons wifi: rtw88: 8822c: Fix reported RX band width
Bjørn Mork (1): wifi: mt76: mt7915: fix oops on non-dbdc mt7986
Calvin Owens (1): ARM: 9410/1: vfp: Use asm volatile in fmrx/fmxr macros
Chao Yu (12): f2fs: atomic: fix to avoid racing w/ GC f2fs: reduce expensive checkpoint trigger frequency f2fs: fix to avoid racing in between read and OPU dio write f2fs: fix to wait page writeback before setting gcing flag f2fs: atomic: fix to truncate pagecache before on-disk metadata truncation f2fs: support .shutdown in f2fs_sops f2fs: fix to avoid use-after-free in f2fs_stop_gc_thread() f2fs: compress: do sanity check on cluster when CONFIG_F2FS_CHECK_FS is on f2fs: clean up w/ dotdot_name f2fs: get rid of online repaire on corrupted directory f2fs: fix to don't set SB_RDONLY in f2fs_handle_critical_error() f2fs: fix to check atomic_file in f2fs ioctl interfaces
Charles Han (1): mtd: powernv: Add check devm_kasprintf() returned value
Chen Yu (1): kthread: fix task state in kthread worker if being frozen
Chen-Yu Tsai (3): regulator: Return actual error in of_regulator_bulk_get_all() arm64: dts: mediatek: mt8195: Correct clock order for dp_intf* arm64: dts: mediatek: mt8195-cherry: Mark USB 3.0 on xhci1 as disabled
Cheng Xu (1): RDMA/erdma: Return QP state in erdma_query_qp
Chengchang Tang (2): RDMA/hns: Fix spin_unlock_irqrestore() called with IRQs enabled RDMA/hns: Fix 1bit-ECC recovery address in non-4K OS
Chris Morgan (1): power: supply: axp20x_battery: Remove design from min and max voltage
Christian Heusel (1): block: print symbolic error name instead of error code
Christophe JAILLET (4): fbdev: hpfb: Fix an error handling path in hpfb_dio_probe() drm/stm: Fix an error handling path in stm_drm_platform_probe() pinctrl: ti: ti-iodelay: Fix some error handling paths pps: remove usage of the deprecated ida_simple_xx() API
Christophe Leroy (3): powerpc/8xx: Fix initial memory mapping powerpc/8xx: Fix kernel vs user address comparison powerpc/vdso: Inconditionally use CFUNC macro
Chuck Lever (1): fs: Create a generic is_dot_dotdot() utility
Claudiu Beznea (3): ARM: dts: microchip: sama7g5: Fix RTT clock drm/stm: ltdc: check memory returned by devm_kzalloc() clk: at91: sama7g5: Allocate only the needed amount of memory for PLLs
Clément Léger (1): ACPI: CPPC: Fix MASK_VAL() usage
Cristian Marussi (1): firmware: arm_scmi: Fix double free in OPTEE transport
Cupertino Miranda (1): selftests/bpf: Add CFLAGS per source file and runner
D Scott Phillips (1): arm64: errata: Enable the AC03_CPU_38 workaround for ampere1a
Daeho Jeong (1): f2fs: prevent atomic file from being dirtied before commit
Daehwan Jung (1): xhci: Add a quirk for writing ERST in high-low order
Damien Le Moal (1): ata: libata-scsi: Fix ata_msense_control() CDL page reporting
Dan Carpenter (4): powercap: intel_rapl: Fix off by one in get_rpi() scsi: elx: libefc: Fix potential use after free in efc_nport_vport_del() PCI: keystone: Fix if-statement expression in ks_pcie_quirk() ep93xx: clock: Fix off by one in ep93xx_div_recalc_rate()
Daniel Borkmann (4): bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit bpf: Fix helper writes to read-only maps bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
Danny Tsen (1): crypto: powerpc/p10-aes-gcm - Disable CRYPTO_AES_GCM_P10
Dave Jiang (1): ntb: Force physically contiguous allocation of rx ring buffers
Dave Martin (1): arm64: signal: Fix some under-bracketed UAPI macros
David Gow (1): mm: only enforce minimum stack gap size if it's sensible
David Howells (1): cachefiles: Fix non-taking of sb_writers around set/removexattr
David Lechner (1): clk: ti: dra7-atl: Fix leak of of_nodes
David Sterba (1): btrfs: reorder btrfs_inode to fill gaps
David Virag (1): arm64: dts: exynos: exynos7885-jackpotlte: Correct RAM amount to 4GB
Dmitry Antipov (4): wifi: rtw88: always wait for both firmware loading attempts wifi: cfg80211: fix UBSAN noise in cfg80211_wext_siwscan() wifi: cfg80211: fix two more possible UBSAN-detected off-by-one errors wifi: mac80211: use two-phase skb reclamation in ieee80211_do_stop()
Dmitry Baryshkov (8): iommu/arm-smmu-qcom: apply num_context_bank fixes for SDM630 / SDM660 drm/msm/dsi: correct programming sequence for SM8350 / SM8450 clk: qcom: dispcc-sm8550: fix several supposed typos clk: qcom: dispcc-sm8550: use rcg2_ops for mdss_dptx1_aux_clk_src clk: qcom: dispcc-sm8650: Update the GDSC flags clk: qcom: dispcc-sm8550: use rcg2_shared_ops for ESC RCGs clk: qcom: dispcc-sm8250: use special function for Lucid 5LPE PLL Revert "soc: qcom: smd-rpm: Match rpmsg channel instead of compatible"
Dmitry Kandybka (1): wifi: rtw88: remove CPT execution branch never used
Dmitry Vyukov (2): x86/entry: Remove unwanted instrumentation in common_interrupt() module: Fix KCOV-ignored file name
Dragan Simic (2): arm64: dts: rockchip: Raise Pinebook Pro's panel backlight PWM frequency arm64: dts: rockchip: Correct the Pinebook Pro battery design capacity
Duanqiang Wen (1): Revert "net: libwx: fix alloc msix vectors failed"
Eduard Zingerman (1): bpf: correctly handle malformed BPF_CORE_TYPE_ID_LOCAL relos
Emanuele Ghidoli (2): Input: ilitek_ts_i2c - avoid wrong input subsystem sync Input: ilitek_ts_i2c - add report id message validation
Eric Dumazet (4): sock_map: Add a cond_resched() in sock_hash_free() ipv6: avoid possible NULL deref in rt6_uncached_list_flush_dev() netfilter: nf_reject_ipv6: fix nf_reject_ip6_tcphdr_put() icmp: change the order of rate limits
Fabio Porcedda (1): bus: mhi: host: pci_generic: Fix the name for the Telit FE990A
Fangzhi Zuo (2): drm/amd/display: Fix Synaptics Cascaded Panamera DSC Determination drm/amd/display: Skip Recompute DSC Params if no Stream on Link
Fei Shao (1): drm/mediatek: Use spin_lock_irqsave() for CRTC event lock
Felix Fietkau (2): wifi: mt76: mt7603: fix mixed declarations and code wifi: mt76: mt7996: fix uninitialized TLV data
Felix Moessbauer (4): io_uring/io-wq: do not allow pinning outside of cpuset io_uring/io-wq: inherit cpuset of cgroup in io worker io_uring/sqpoll: do not allow pinning outside of cpuset io_uring/sqpoll: do not put cpumask on stack
Filipe Manana (2): btrfs: update comment for struct btrfs_inode::lock btrfs: fix race setting file private on concurrent lseek using same fd
Finn Thain (5): m68k: Fix kernel_clone_args.flags in m68k_clone() scsi: NCR5380: Check for phase match during PDMA fixup scsi: mac_scsi: Revise printk(KERN_DEBUG ...) messages scsi: mac_scsi: Refactor polling loop scsi: mac_scsi: Disallow bus errors during PDMA send
Florian Fainelli (1): tty: rp2: Fix reset with non forgiving PCIe host bridges
Frank Li (1): PCI: imx6: Fix missing call to phy_power_off() in error handling
Frederic Weisbecker (1): rcu/nocb: Fix RT throttling hrtimer armed from offline CPU
Furong Xu (1): net: stmmac: set PP_FLAG_DMA_SYNC_DEV only if XDP is enabled
Gao Xiang (1): erofs: fix incorrect symlink detection in fast symlink
Gaosheng Cui (2): hwrng: bcm2835 - Add missing clk_disable_unprepare in bcm2835_rng_init hwrng: cctrng - Add missing clk_disable_unprepare in cctrng_resume
Geert Uytterhoeven (1): pmdomain: core: Harden inter-column space in debug summary
Gergo Koteles (1): ASoC: tas2781: remove unused acpi_subysystem_id
Gil Fine (9): thunderbolt: Fix debug log when DisplayPort adapter not available for pairing thunderbolt: Create multiple DisplayPort tunnels if there are more DP IN/OUT pairs thunderbolt: Make is_gen4_link() available to the rest of the driver thunderbolt: Change bandwidth reservations to comply USB4 v2 thunderbolt: Introduce tb_port_path_direction_downstream() thunderbolt: Add support for asymmetric link thunderbolt: Configure asymmetric link if needed and bandwidth allows thunderbolt: Improve DisplayPort tunnel setup process to be more robust thunderbolt: Fix minimum allocated USB 3.x and PCIe bandwidth
Gilbert Wu (1): scsi: smartpqi: revert propagate-the-multipath-failure-to-SML-quickly
Golan Ben Ami (1): wifi: iwlwifi: remove AX101, AX201 and AX203 support from LNL
Greg Kroah-Hartman (1): Linux 6.6.54
Guenter Roeck (2): hwmon: (max16065) Fix overflows seen when writing limits hwmon: (max16065) Fix alarm attributes
Guillaume Nault (2): bareudp: Pull inner IP header in bareudp_udp_encap_recv(). bareudp: Pull inner IP header on xmit.
Guillaume Stols (2): iio: adc: ad7606: fix oversampling gpio array iio: adc: ad7606: fix standby gpio state to match the documentation
Guoqing Jiang (2): nfsd: call cache_put if xdr_reserve_space returns NULL hwrng: mtk - Use devm_pm_runtime_enable
Haibo Chen (3): spi: fspi: involve lut_num for struct nxp_fspi_devtype_data dt-bindings: spi: nxp-fspi: add imx8ulp support spi: fspi: add support for imx8ulp
Hannes Reinecke (1): nvme-multipath: system fails to create generic nvme device
Harshit Mogalapalli (1): usb: yurex: Fix inconsistent locking bug in yurex_read()
Heiner Kallweit (1): r8169: disable ALDPS per default for RTL8125
Helge Deller (1): crypto: xor - fix template benchmarking
Herbert Xu (1): crypto: caam - Pad SG length when allocating hash edesc
Herve Codina (1): soc: fsl: cpm1: tsa: Fix tsa_write8()
Hobin Woo (1): ksmbd: make __dir_empty() compatible with POSIX
Howard Hsu (3): wifi: mt76: mt7996: fix HE and EHT beamforming capabilities wifi: mt76: mt7996: fix EHT beamforming capability check wifi: mt76: mt7915: fix rx filter setting for bfee functionality
Ian Rogers (2): perf inject: Fix leader sampling inserting additional samples perf time-utils: Fix 32-bit nsec parsing
Ilpo Järvinen (1): PCI: Wait for Link before restoring Downstream Buses
Jack Wang (1): RDMA/rtrs: Reset hb_missed_cnt after receiving other traffic from peer
Jacky Bai (1): clk: imx: composite-93: keep root clock on when mcore enabled
Jake Hamby (1): can: m_can: enable NAPI before enabling interrupts
Jann Horn (2): firmware_loader: Block path traversal f2fs: Require FMODE_WRITE for atomic write ioctls
Jason Gerecke (2): HID: wacom: Support sequence numbers smaller than 16-bit HID: wacom: Do not warn about dropped packets for first packet
Jason Gunthorpe (2): iommu/amd: Do not set the D bit on AMD v2 table entries iommufd: Protect against overflow of ALIGN() during iova allocation
Jason Wang (1): vhost_vdpa: assign irq bypass producer token correctly
Jason-JH.Lin (1): drm/mediatek: Fix missing configuration flags in mtk_crtc_ddp_config()
Javier Carrasco (3): leds: bd2606mvv: Fix device child node usage in bd2606mvv_probe() leds: pca995x: Use device_for_each_child_node() to access device child nodes leds: pca995x: Fix device child node usage in pca995x_probe()
Jeff Layton (2): nfsd: remove unneeded EEXIST error check in nfsd_do_file_acquire nfsd: fix refcount leak when file is unhashed after being found
Jens Axboe (2): io_uring: check for presence of task_work rather than TIF_NOTIFY_SIGNAL io_uring/sqpoll: retain test for whether the CPU is valid
Jeongjun Park (1): jfs: fix out-of-bounds in dbNextAG() and diAlloc()
Jiangshan Yi (1): samples/bpf: Fix compilation errors with cf-protection option
Jiawei Ye (2): wifi: wilc1000: fix potential RCU dereference issue in wilc_parse_join_bss_param smackfs: Use rcu_assign_pointer() to ensure safe assignment in smk_set_cipso
Jing Zhang (1): drivers/perf: Fix ali_drw_pmu driver interrupt status clearing
Jinjie Ruan (8): net: enetc: Use IRQF_NO_AUTOEN flag in request_irq() spi: bcmbca-hsspi: Fix missing pm_runtime_disable() mtd: rawnand: mtk: Use for_each_child_of_node_scoped() riscv: Fix fp alignment bug in perf_callchain_user() ntb: intel: Fix the NULL vs IS_ERR() bug for debugfs_create_dir() spi: atmel-quadspi: Undo runtime PM changes at driver exit time spi: spi-fsl-lpspi: Undo runtime PM changes at driver exit time driver core: Fix a potential null-ptr-deref in module_add_driver()
Jiri Slaby (SUSE) (1): serial: don't use uninitialized value in uart_poll_init()
Jiwon Kim (1): bonding: Fix unnecessary warnings and logs from bond_xdp_get_xmit_slave()
Johan Hovold (1): serial: qcom-geni: fix fifo polling timeout
Johannes Berg (1): wifi: iwlwifi: config: label 'gl' devices as discrete
John B. Wyatt IV (1): pm:cpupower: Add missing powercap_set_enabled() stub function
Jonas Blixt (1): watchdog: imx_sc_wdt: Don't disable WDT in suspend
Jonas Karlman (3): arm64: dts: rockchip: Correct vendor prefix for Hardkernel ODROID-M1 drm/rockchip: dw_hdmi: Fix reading EDID when using a forced mode clk: rockchip: Set parent rate for DCLK_VOP clock on RK3228
Jonathan McDowell (1): tpm: Clean up TPM space after command failure
Jose E. Marchesi (3): bpf: Use -Wno-error in certain tests when building with GCC bpf: Disable some `attribute ignored' warnings in GCC bpf: Temporarily define BPF_NO_PRESEVE_ACCESS_INDEX for GCC
Josh Hunt (1): tcp: check skb is non-NULL in tcp_rto_delta_us()
Juergen Gross (9): xen: use correct end address of kernel for conflict checking xen: introduce generic helper checking for memory map conflicts xen: move max_pfn in xen_memory_setup() out of function scope xen: add capability to remap non-RAM pages to different PFNs xen: tolerate ACPI NVS memory overlapping with Xen allocated memory xen/swiotlb: add alignment check for dma buffers xen/swiotlb: fix allocated size xen: move checks for e820 conflicts further up xen: allow mapping ACPI data using a different physical address
Julian Sun (1): vfs: fix race between evice_inodes() and find_inode()&iput()
Junlin Li (2): drivers: media: dvb-frontends/rtl2832: fix an out-of-bounds write error drivers: media: dvb-frontends/rtl2830: fix an out-of-bounds write error
Junxian Huang (4): RDMA/hns: Don't modify rq next block addr in HIP09 QPC RDMA/hns: Fix VF triggering PF reset in abnormal interrupt handler RDMA/hns: Optimize hem allocation performance RDMA/hns: Fix restricted __le16 degrades to integer issue
Justin Iurman (1): net: ipv6: rpl_iptunnel: Fix memory leak in rpl_input
Kairui Song (3): mm/filemap: return early if failed to allocate memory for split lib/xarray: introduce a new helper xas_get_order mm/filemap: optimize filemap folio adding
Kaixin Wang (1): net: seeq: Fix use after free vulnerability in ether3 Driver Due to Race Condition
Kamlesh Gurudasani (1): padata: Honor the caller's alignment in case of chunk_size 0
Kan Liang (1): perf report: Fix --total-cycles --stdio output error
Kees Cook (1): interconnect: icc-clk: Add missed num_nodes initialization
Kemeng Shi (3): ext4: avoid buffer_head leak in ext4_mark_inode_used() ext4: avoid potential buffer_head leak in __ext4_new_inode() ext4: avoid negative min_clusters in find_group_orlov()
Konrad Dybcio (1): iommu/arm-smmu-qcom: Work around SDM845 Adreno SMMU w/ 16K pages
Krzysztof Kozlowski (12): ARM: dts: imx7d-zii-rmu2: fix Ethernet PHY pinctrl property ARM: versatile: fix OF node leak in CPUs prepare reset: berlin: fix OF node leak in probe() error path reset: k210: fix OF node leak in probe() error path iio: magnetometer: ak8975: drop incorrect AK09116 compatible dt-bindings: iio: asahi-kasei,ak8975: drop incorrect AK09116 compatible soc: versatile: integrator: fix OF node leak in probe() error path bus: integrator-lm: fix OF node leak in probe() cpuidle: riscv-sbi: Use scoped device node handling to fix missing of_node_put ARM: dts: imx6ul-geam: fix fsl,pins property in tscgrp pinctrl soc: versatile: realview: fix memory leak during device remove soc: versatile: realview: fix soc_dev leak during device remove
Kuniyuki Iwashima (1): can: bcm: Clear bo->bcm_proc_read after remove_proc_entry().
Lad Prabhakar (3): arm64: dts: renesas: r9a07g043u: Correct GICD and GICR sizes arm64: dts: renesas: r9a07g054: Correct GICD and GICR sizes arm64: dts: renesas: r9a07g044: Correct GICD and GICR sizes
Lasse Collin (1): xz: cleanup CRC32 edits from 2018
Laurent Pinchart (1): Remove *.orig pattern from .gitignore
Lee Jones (1): usb: yurex: Replace snprintf() with the safer scnprintf() variant
Leo Ma (1): drm/amd/display: Add HDMI DSC native YCbCr422 support
Li Lingfeng (2): nfsd: return -EINVAL when namelen is 0 nfs: fix memory leak in error path of nfs4_do_reclaim
Li Zhijian (1): nvdimm: Fix devs leaks in scan_labels()
Liam R. Howlett (1): mm/damon/vaddr: protect vma traversal in __damon_va_thre_regions() with rcu read lock
Linus Torvalds (1): minmax: avoid overly complex min()/max() macro arguments in xen
Linus Walleij (2): ASoC: tas2781-i2c: Drop weird GPIO code ASoC: tas2781-i2c: Get the right GPIO line
Liu Ying (1): drm/bridge: lontium-lt8912b: Validate mode in drm_bridge_funcs::mode_valid()
Luca Stefani (1): btrfs: always update fstrim_range on failure in FITRIM ioctl
Luiz Augusto von Dentz (3): Bluetooth: hci_core: Fix sending MGMT_EV_CONNECT_FAILED Bluetooth: hci_sync: Ignore errors from HCI_OP_REMOTE_NAME_REQ_CANCEL Bluetooth: btusb: Fix not handling ZPL/short-transfer
Ma Ke (8): spi: ppc4xx: handle irq_of_parse_and_map() errors ASoC: rt5682s: Return devm_of_clk_add_hw_provider to transfer the error ASoC: rt5682: Return devm_of_clk_add_hw_provider to transfer the error wifi: mt76: mt7921: Check devm_kasprintf() returned value wifi: mt76: mt7915: check devm_kasprintf() returned value wifi: mt76: mt7996: fix NULL pointer dereference in mt7996_mcu_sta_bfer_he wifi: mt76: mt7615: check devm_kasprintf() returned value pps: add an error check in parport_attach
Maciej W. Rozycki (4): PCI: Revert to the original speed after PCIe failed link retraining PCI: Clear the LBMS bit after a link retrain PCI: Correct error reporting with PCIe failed link retraining PCI: Use an error code with PCIe failed link retraining
Manish Pandey (1): scsi: ufs: qcom: Update MODE_MAX cfg_bw value
Marc Gonzalez (1): iommu/arm-smmu-qcom: hide last LPASS SMMU context bank from linux
Marc Kleine-Budde (1): can: m_can: m_can_close(): stop clocks after device has been shut down
Mario Limonciello (1): drm/amd/display: Validate backlight caps are sane
Mark Bloch (1): RDMA/mlx5: Obtain upper net device only when needed
Mark Brown (1): kselftest/arm64: Actually test SME vector length changes via sigreturn
Markus Elfring (1): clk: imx: composite-8m: Less function calls in __imx8m_clk_hw_composite() after error detection
Markus Schneider-Pargmann (1): serial: 8250: omap: Cleanup on error in request_irq
Martin Wilck (1): scsi: sd: Fix off-by-one error in sd_read_block_characteristics()
Masami Hiramatsu (Google) (1): selftests/ftrace: Add required dependency for kprobe tests
Mathias Nyman (1): xhci: Set quirky xHC PCI hosts to D3 _after_ stopping and freeing them.
Max Hawking (1): ntb_perf: Fix printk format
Md Haris Iqbal (1): RDMA/rtrs-clt: Reset cid to con_num - 1 to stay in bounds
Mel Gorman (5): sched/numa: Document vma_numab_state fields sched/numa: Rename vma_numab_state::access_pids[] => ::pids_active[], ::next_pid_reset => ::pids_active_reset sched/numa: Trace decisions related to skipping VMAs sched/numa: Complete scanning of partial VMAs regardless of PID activity sched/numa: Complete scanning of inactive VMAs when there is no alternative
Michael Ellerman (1): powerpc/atomic: Use YZ constraints for DS-form instructions
Michael Guralnik (1): RDMA/mlx5: Limit usage of over-sized mkeys from the MR cache
Michael Trimarchi (1): tty: serial: kgdboc: Fix 8250_* kgdb over serial
Mickaël Salaün (1): fs: Fix file_set_fowner LSM hook inconsistencies
Mika Westerberg (8): thunderbolt: Use tb_tunnel_dbg() where possible to make logging more consistent thunderbolt: Expose tb_tunnel_xxx() log macros to the rest of the driver thunderbolt: Use constants for path weight and priority thunderbolt: Use weight constants in tb_usb3_consumed_bandwidth() thunderbolt: Introduce tb_for_each_upstream_port_on_path() thunderbolt: Introduce tb_switch_depth() thunderbolt: Send uevent after asymmetric/symmetric switch thunderbolt: Fix NULL pointer dereference in tb_port_update_credits()
Mikhail Lobanov (2): RDMA/cxgb4: Added NULL check for lookup_atid drbd: Add NULL check for net_conf to prevent dereference in state validation
Mikulas Patocka (3): Revert "dm: requeue IO if mapping table not yet available" dm-verity: restart or panic on an I/O error Revert: "dm-verity: restart or panic on an I/O error"
Ming Lei (3): ublk: move zone report data out of request pdu nbd: fix race between timeout and normal completion lib/sbitmap: define swap_lock as raw_spinlock_t
Miquel Raynal (2): mtd: rawnand: mtk: Factorize out the logic cleaning mtk chips mtd: rawnand: mtk: Fix init error path
Mirsad Todorovac (1): mtd: slram: insert break after errors in parsing the map
Namhyung Kim (4): perf mem: Free the allocated sort string, fixing a leak perf annotate: Split branch stack cycles info from 'struct annotation' perf annotate: Move some source code related fields from 'struct annotation' to 'struct annotated_source' perf ui/browser/annotate: Use global annotation_options
Namjae Jeon (2): ksmbd: allow write with FILE_APPEND_DATA ksmbd: handle caseless file creation
Nick Morrow (1): wifi: rtw88: 8821cu: Remove VID/PID 0bda:c82c
Nikita Zhandarovich (4): drm/radeon/evergreen_cs: fix int overflow errors in cs track offsets f2fs: fix several potential integer overflows in file offsets f2fs: prevent possible int overflow in dir_block_index() f2fs: avoid potential int overflow in sanity_check_area_boundary()
Niklas Cassel (1): ata: libata: Clear DID_TIME_OUT for ATA PT commands with sense data
Nishanth Menon (1): cpufreq: ti-cpufreq: Introduce quirks to handle syscon fails appropriately
Nuno Sa (1): Input: adp5588-keys - fix check on return code
Ojaswin Mujoo (1): ext4: check stripe size compatibility on remount as well
Olaf Hering (1): mount: handle OOM on mnt_warn_timestamp_expiry
Oleg Nesterov (1): bpf: Fix use-after-free in bpf_uprobe_multi_link_attach()
Oliver Neukum (5): usbnet: fix cyclical race on disconnect with work queue USB: appledisplay: close race between probe and completion handler USB: misc: cypress_cy7c63: check for short transfer USB: class: CDC-ACM: fix race between get_serial and set_serial USB: misc: yurex: fix race between read and write
P Praneesh (2): wifi: ath12k: fix BSS chan info request WMI command wifi: ath12k: match WMI BSS chan info structure with firmware definition
Pablo Neira Ayuso (5): netfilter: nf_tables: elements with timeout below CONFIG_HZ never expire netfilter: nf_tables: reject element expiration with no timeout netfilter: nf_tables: reject expiration higher than timeout netfilter: nf_tables: remove annotation to access set timeout while holding lock netfilter: nf_tables: use rcu chain hook list iterator from netlink dump path
Paolo Bonzini (1): Documentation: KVM: fix warning in "make htmldocs"
Patrisious Haddad (1): IB/core: Fix ib_cache_setup_one error flow cleanup
Pavan Kumar Paluri (1): crypto: ccp - Properly unregister /dev/sev on sev PLATFORM_STATUS failure
Pawel Laszczak (2): usb: cdnsp: Fix incorrect usb_request status usb: xhci: fix loss of data on Cadence xHC
Peng Fan (7): clk: imx: composite-8m: Enable gate clk with mcore_booted clk: imx: imx8qxp: Register dc0_bypass0_clk before disp clk clk: imx: imx8qxp: Parent should be initialized earlier than the clock remoteproc: imx_rproc: Correct ddr alias for i.MX8M remoteproc: imx_rproc: Initialize workqueue earlier pinctrl: ti: iodelay: Use scope based of_node_put() cleanups dt-bindings: spi: nxp-fspi: support i.MX93 and i.MX95
Pengfei Li (1): clk: imx: fracn-gppll: fix fractional part of PLL getting lost
Peter Chiu (3): wifi: mt76: mt7996: use hweight16 to get correct tx antenna wifi: mt76: mt7996: fix traffic delay when switching back to working channel wifi: mt76: mt7996: fix wmm set of station interface to 3
Phil Sutter (1): netfilter: nf_tables: Keep deleted flowtable hooks until after RCU
Pieterjan Camerlynck (1): leds: leds-pca995x: Add support for NXP PCA9956B
Ping-Ke Shih (1): wifi: mac80211: don't use rate mask for offchannel TX either
Qingqing Zhou (1): arm64: dts: qcom: sa8775p: Mark APPS and PCIe SMMUs as DMA coherent
Qiu-ji Chen (1): drbd: Fix atomicity violation in drbd_uuid_set_bm()
Qiuxu Zhuo (1): EDAC/igen6: Fix conversion of system address to physical memory address
Qu Wenruo (2): btrfs: tree-checker: fix the wrong output of data backref objectid btrfs: subpage: fix the bitmap dump which can cause bitmap corruption
Raghavendra K T (1): sched/numa: Move up the access pid reset logic
Riyan Dhiman (1): block: fix potential invalid pointer dereference in blk_add_partition
Rob Herring (1): pinctrl: Use device_get_match_data()
Rob Herring (Arm) (1): ASoC: tas2781: Use of_property_read_reg()
Robin Chen (1): drm/amd/display: Round calculated vtotal
Robin Murphy (6): perf/arm-cmn: Rework DTC counters (again) perf/arm-cmn: Improve debugfs pretty-printing for large configs perf/arm-cmn: Refactor node ID handling. Again. perf/arm-cmn: Fix CCLA register offset perf/arm-cmn: Ensure dtm_idx is big enough perf/arm-cmn: Fail DTC counter allocation correctly
Roman Smirnov (2): Revert "media: tuners: fix error return code of hybrid_tuner_request_state()" KEYS: prevent NULL pointer dereference in find_asymmetric_key()
Ryusuke Konishi (3): nilfs2: fix potential null-ptr-deref in nilfs_btree_insert() nilfs2: determine empty node blocks as corrupted nilfs2: fix potential oob read in nilfs_btree_check_delete()
Samasth Norway Ananda (1): x86/PCI: Check pcie_find_root_port() return for NULL
Sean Anderson (5): PCI: xilinx-nwl: Fix register misspelling PCI: xilinx-nwl: Clean up clock on probe failure/removal net: xilinx: axienet: Schedule NAPI in two steps net: xilinx: axienet: Fix packet counting PCI: xilinx-nwl: Fix off-by-one in INTx IRQ handler
Sean Christopherson (3): KVM: x86: Enforce x2APIC's must-be-zero reserved ICR bits KVM: x86: Move x2APIC ICR helper above kvm_apic_write_nodecode() KVM: Use dedicated mutex to protect kvm_usage_count to avoid deadlock
Sebastien Laveze (1): clk: imx: imx6ul: fix default parent for enet*_ref_sel
Serge Semin (1): EDAC/synopsys: Fix ECC status and IRQ control race condition
Shengjiu Wang (1): clk: imx: clk-audiomix: Correct parent clock for earc_phy and audpll
Sherry Yang (1): drm/msm: fix %s null argument error
Shu Han (1): mm: call the security_mmap_file() LSM hook in remap_file_pages()
Shubhrajyoti Datta (1): EDAC/synopsys: Fix error injection on Zynq UltraScale+
Siddharth Vadapalli (1): PCI: dra7xx: Fix threaded IRQ request for "dra7xx-pcie-main" IRQ
Simon Horman (1): netfilter: ctnetlink: compile ctnetlink_label_size with CONFIG_NF_CONNTRACK_EVENTS
Snehal Koukuntla (1): KVM: arm64: Add memory length checks and remove inline in do_ffa_mem_xfer
Song Liu (1): bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0
Srinivasan Shanmugam (1): drm/amd/display: Add null check for set_output_gamma in dcn30_set_output_transfer_func
Stefan Mätje (1): can: esd_usb: Remove CAN_CTRLMODE_3_SAMPLES for CAN-USB/3-FD
Stefan Wahren (1): drm/vc4: hdmi: Handle error case of pm_runtime_resume_and_get
Su Hui (1): net: tipc: avoid possible garbage value
Suzuki K Poulose (1): coresight: tmc: sg: Do not leak sg_table
Syed Nayyar Waris (1): lib/bitmap: add bitmap_{read,write}()
Takashi Sakamoto (1): firewire: core: correct range of block for case of switch statement
Thadeu Lima de Souza Cascardo (2): ext4: return error on ext4_find_inline_entry ext4: avoid OOB when system.data xattr changes underneath the filesystem
Thomas Weißschuh (2): net: ipv6: select DST_CACHE from IPV6_RPL_LWTUNNEL ACPI: sysfs: validate return type of _STR method
Tianchen Ding (1): sched/fair: Make SCHED_IDLE entity be preempted in strict hierarchy
Toke Høiland-Jørgensen (1): wifi: ath9k: Remove error checks when creating debugfs entries
Tomas Marek (1): usb: dwc2: drd: fix clock gating on USB role switch
Tommy Huang (1): i2c: aspeed: Update the stop sw state when the bus recovery occurs
Tony Ambardar (25): selftests/bpf: Fix error linking uprobe_multi on mips selftests/bpf: Fix wrong binary in Makefile log output tools/runqslower: Fix LDFLAGS and add LDLIBS support selftests/bpf: Use pid_t consistently in test_progs.c selftests/bpf: Fix compile error from rlim_t in sk_storage_map.c selftests/bpf: Fix error compiling bpf_iter_setsockopt.c with musl libc selftests/bpf: Drop unneeded error.h includes selftests/bpf: Fix missing ARRAY_SIZE() definition in bench.c selftests/bpf: Fix missing UINT_MAX definitions in benchmarks selftests/bpf: Fix missing BUILD_BUG_ON() declaration selftests/bpf: Fix include of <sys/fcntl.h> selftests/bpf: Fix compiling parse_tcp_hdr_opt.c with musl-libc selftests/bpf: Fix compiling kfree_skb.c with musl-libc selftests/bpf: Fix compiling flow_dissector.c with musl-libc selftests/bpf: Fix compiling tcp_rtt.c with musl-libc selftests/bpf: Fix compiling core_reloc.c with musl-libc selftests/bpf: Fix errors compiling lwt_redirect.c with musl libc selftests/bpf: Fix errors compiling decap_sanity.c with musl libc selftests/bpf: Fix errors compiling cg_storage_multi.h with musl libc selftests/bpf: Fix arg parsing in veristat, test_progs selftests/bpf: Fix error compiling test_lru_map.c selftests/bpf: Fix C++ compile error from missing _Bool type selftests/bpf: Fix redefinition errors compiling lwt_reroute.c selftests/bpf: Fix compile if backtrace support missing in libc selftests/bpf: Fix error compiling tc_redirect.c with musl libc
Tushar Vyavahare (1): selftests/bpf: Implement get_hw_ring_size function to retrieve current and max interface size
Uwe Kleine-König (1): pinctrl: ti: ti-iodelay: Convert to platform remove callback returning void
VanGiang Nguyen (1): padata: use integer wrap around to prevent deadlock on seq_nr overflow
Varadarajan Narayanan (1): clk: qcom: ipq5332: Register gcc_qdss_tsctr_clk_src
Vasileios Amoiridis (1): iio: chemical: bme680: Fix read/write ops to device by adding mutexes
Vitaliy Shevtsov (1): RDMA/irdma: fix error message in irdma_modify_qp_roce()
Vladimir Lypak (4): drm/msm/a5xx: disable preemption in submits by default drm/msm/a5xx: properly clear preemption records on resume drm/msm/a5xx: fix races in preemption evaluation stage drm/msm/a5xx: workaround early ring-buffer emptiness check
Wang Jianzheng (1): pinctrl: mvebu: Fix devinit_dove_pinctrl_probe function
WangYuli (1): drm/amd/amdgpu: Properly tune the size of struct
Weili Qian (3): crypto: hisilicon/hpre - mask cluster timeout error crypto: hisilicon/qm - reset device before enabling it crypto: hisilicon/qm - inject error before stopping queue
Wenbo Li (1): virtio_net: Fix mismatched buf address when unmapping for small packets
Werner Sembach (4): Input: i8042 - add TUXEDO Stellaris 16 Gen5 AMD to i8042 quirk table Input: i8042 - add TUXEDO Stellaris 15 Slim Gen6 AMD to i8042 quirk table Input: i8042 - add another board name for TUXEDO Stellaris Gen5 AMD line ACPI: resource: Add another DMI match for the TongFang GMxXGxx
Wolfram Sang (1): ipmi: docs: don't advertise deprecated sysfs entries
Xin Li (1): x86/idtentry: Incorporate definitions/declarations of the FRED entries
Yanfei Xu (1): cxl/pci: Fix to record only non-zero ranges
Yang Jihong (2): perf sched timehist: Fix missing free of session in perf_sched__timehist() perf sched timehist: Fixed timestamp error when unable to confirm event sched_in time
Yang Yingliang (1): pinctrl: single: fix missing error code in pcs_probe()
Yanteng Si (1): net: stmmac: dwmac-loongson: Init ref and PTP clocks rate
Ye Li (1): clk: imx: composite-7ulp: Check the PCC present bit
Yeongjin Gil (2): f2fs: Create COW inode from parent dentry for atomic write f2fs: compress: don't redirty sparse cluster during {,de}compress
Yicong Yang (3): drivers/perf: hisi_pcie: Record hardware counts correctly drivers/perf: hisi_pcie: Fix TLP headers bandwidth counting perf stat: Display iostat headers correctly
Yonghong Song (4): selftests/bpf: Replace CHECK with ASSERT_* in ns_current_pid_tgid test selftests/bpf: Refactor out some functions in ns_current_pid_tgid test selftests/bpf: Add a cgroup prog bpf_get_ns_current_pid_tgid() test selftests/bpf: Fix flaky selftest lwt_redirect/lwt_reroute
Yosry Ahmed (1): x86/mm: Use IPIs to synchronize LAM enablement
Youssef Samir (1): net: qrtr: Update packets cloning when broadcasting
Yu Kuai (5): block, bfq: fix possible UAF for bfqq->bic with merge chain block, bfq: choose the last bfqq from merge chain in bfq_setup_cooperator() block, bfq: don't break merge chain in bfq_split_bfqq() block, bfq: fix uaf for accessing waker_bfqq after splitting block, bfq: fix procress reference leakage for bfqq in merge chain
Yuesong Li (1): drivers:drm:exynos_drm_gsc:Fix wrong assignment in gsc_bind()
Yujie Liu (1): sched/numa: Fix the vma scan starving issue
Yunfei Dong (3): media: mediatek: vcodec: Fix H264 multi stateless decoder smatch warning media: mediatek: vcodec: Fix VP8 stateless decoder smatch warning media: mediatek: vcodec: Fix H264 stateless decoder smatch warning
Yuntao Liu (3): ALSA: hda: cs35l41: fix module autoloading hwmon: (ntc_thermistor) fix module autoloading clk: starfive: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage
Zack Rusin (1): drm/vmwgfx: Prevent unmapping active read buffers
Zhang Changzhong (1): can: j1939: use correct function name in comment
Zhen Lei (1): debugobjects: Fix conditions in fill_pool()
Zhiguo Niu (1): lockdep: fix deadlock issue between lockdep and rcu
Zhipeng Wang (1): clk: imx: imx8mp: fix clock tree update of TF-A managed clocks
Zhu Yanjun (1): RDMA/iwcm: Fix WARNING:at_kernel/workqueue.c:#check_flush_dependency
Zijun Hu (1): driver core: Fix error handling in driver API device_rename()
tangbin (1): ASoC: loongson: fix error release
wenglianfa (2): RDMA/hns: Fix Use-After-Free of rsv_qp on HIP08 RDMA/hns: Fix the overflow risk of hem_list_calc_ba_range()
yangerkun (1): ext4: clear EXT4_GROUP_INFO_WAS_TRIMMED_BIT even mount with discard
diff --git a/.gitignore b/.gitignore index 0bbae167bf93..d1a8ab3f98aa 100644 --- a/.gitignore +++ b/.gitignore @@ -135,7 +135,6 @@ GTAGS # id-utils files ID
-*.orig *~ #*#
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 index 31dbb390573f..c431f0a13cf5 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 +++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 @@ -3,7 +3,7 @@ KernelVersion: Contact: linux-iio@vger.kernel.org Description: Reading this returns the valid values that can be written to the - on_altvoltage0_mode attribute: + filter_mode attribute:
- auto -> Adjust bandpass filter to track changes in input clock rate. - manual -> disable/unregister the clock rate notifier / input clock tracking. diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index 357d6cb98161..3084f5cf5e40 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -54,6 +54,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 | +----------------+-----------------+-----------------+-----------------------------+ +| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml index 9790f75fc669..fe5145d3b73c 100644 --- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml +++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml @@ -23,7 +23,6 @@ properties: - ak8963 - ak09911 - ak09912 - - ak09916 deprecated: true
reg: diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml index 7fd591145480..902db92da832 100644 --- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml +++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml @@ -15,12 +15,19 @@ allOf:
properties: compatible: - enum: - - nxp,imx8dxl-fspi - - nxp,imx8mm-fspi - - nxp,imx8mp-fspi - - nxp,imx8qxp-fspi - - nxp,lx2160a-fspi + oneOf: + - enum: + - nxp,imx8dxl-fspi + - nxp,imx8mm-fspi + - nxp,imx8mp-fspi + - nxp,imx8qxp-fspi + - nxp,imx8ulp-fspi + - nxp,lx2160a-fspi + - items: + - enum: + - nxp,imx93-fspi + - nxp,imx95-fspi + - const: nxp,imx8mm-fspi
reg: items: diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst index e224e47b6b09..dfa021eacd63 100644 --- a/Documentation/driver-api/ipmi.rst +++ b/Documentation/driver-api/ipmi.rst @@ -540,7 +540,7 @@ at module load time (for a module) with:: alerts_broken
The addresses are normal I2C addresses. The adapter is the string -name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. +name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name. It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring spaces, so if the name is "This is an I2C chip" you can say adapter_name=ThisisanI2cchip. This is because it's hard to pass in diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 3a034db5e55f..887d9d2fed49 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -9,7 +9,7 @@ KVM Lock Overview
The acquisition orders for mutexes are as follows:
-- cpus_read_lock() is taken outside kvm_lock +- cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock
- kvm->lock is taken outside vcpu->mutex
@@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows: are taken on the waiting side when modifying memslots, so MMU notifiers must not take either kvm->slots_lock or kvm->slots_arch_lock.
+cpus_read_lock() vs kvm_lock: + +- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that + being the official ordering, as it is quite easy to unknowingly trigger + cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list, + e.g. avoid complex operations when possible. + For SRCU:
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections @@ -228,10 +235,17 @@ time it will be set using the Dirty tracking mechanism described above. :Type: mutex :Arch: any :Protects: - vm_list - - kvm_usage_count + +``kvm_usage_lock`` +^^^^^^^^^^^^^^^^^^ + +:Type: mutex +:Arch: any +:Protects: - kvm_usage_count - hardware virtualization enable/disable -:Comment: KVM also disables CPU hotplug via cpus_read_lock() during - enable/disable. +:Comment: Exists because using kvm_lock leads to deadlock (see earlier comment + on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via + cpus_read_lock() when enabling/disabling virtualization.
``kvm->mn_invalidate_lock`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -291,11 +305,12 @@ time it will be set using the Dirty tracking mechanism described above. wakeup.
``vendor_module_lock`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^ :Type: mutex :Arch: x86 :Protects: loading a vendor module (kvm_amd or kvm_intel) -:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is - taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and - many operations need to take cpu_hotplug_lock when loading a vendor module, - e.g. updating static calls. +:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken + in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while + cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many + operations need to take cpu_hotplug_lock when loading a vendor module, e.g. + updating static calls. diff --git a/Makefile b/Makefile index 0158e14f0dd9..1e382bacd8ea 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 6 -SUBLEVEL = 53 +SUBLEVEL = 54 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth
diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi index 73d570a17269..1705c96f4221 100644 --- a/arch/arm/boot/dts/microchip/sam9x60.dtsi +++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi @@ -1312,7 +1312,7 @@ rtt: rtc@fffffe20 { compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; reg = <0xfffffe20 0x20>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; };
pit: timer@fffffe40 { @@ -1338,7 +1338,7 @@ rtc: rtc@fffffea8 { compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc"; reg = <0xfffffea8 0x100>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; };
watchdog: watchdog@ffffff80 { diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi index 269e0a3ca269..7a95464bb78d 100644 --- a/arch/arm/boot/dts/microchip/sama7g5.dtsi +++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi @@ -272,7 +272,7 @@ rtt: rtc@e001d020 { compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; reg = <0xe001d020 0x30>; interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; };
clk32k: clock-controller@e001d050 { diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts index 875ae699c5cb..ce9f4c226729 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts @@ -366,7 +366,7 @@ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0 };
pinctrl_tsc: tscgrp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0 MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0 MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts index 521493342fe9..8f5566027c25 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts @@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59
&iomuxc_lpsr { pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp { - fsl,phy = < + fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08 >; }; diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 85a496ddc619..e9f72a529b50 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw, u32 val = __raw_readl(psc->reg); u8 index = (val & psc->mask) >> psc->shift;
- if (index > psc->num_div) + if (index >= psc->num_div) return 0;
return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]); diff --git a/arch/arm/mach-versatile/platsmp-realview.c b/arch/arm/mach-versatile/platsmp-realview.c index 5d363385c801..059d796b26bc 100644 --- a/arch/arm/mach-versatile/platsmp-realview.c +++ b/arch/arm/mach-versatile/platsmp-realview.c @@ -66,6 +66,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) return; } map = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(map)) { pr_err("PLATSMP: No syscon regmap\n"); return; diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h index 3c7938fd40aa..32090b0fb250 100644 --- a/arch/arm/vfp/vfpinstr.h +++ b/arch/arm/vfp/vfpinstr.h @@ -64,33 +64,37 @@
#ifdef CONFIG_AS_VFP_VMRS_FPINST
-#define fmrx(_vfp_) ({ \ - u32 __v; \ - asm(".fpu vfpv2\n" \ - "vmrs %0, " #_vfp_ \ - : "=r" (__v) : : "cc"); \ - __v; \ - }) - -#define fmxr(_vfp_,_var_) \ - asm(".fpu vfpv2\n" \ - "vmsr " #_vfp_ ", %0" \ - : : "r" (_var_) : "cc") +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm volatile (".fpu vfpv2\n" \ + "vmrs %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ + __v; \ +}) + +#define fmxr(_vfp_, _var_) ({ \ + asm volatile (".fpu vfpv2\n" \ + "vmsr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc"); \ +})
#else
#define vfpreg(_vfp_) #_vfp_
-#define fmrx(_vfp_) ({ \ - u32 __v; \ - asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ - : "=r" (__v) : : "cc"); \ - __v; \ - }) - -#define fmxr(_vfp_,_var_) \ - asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ - : : "r" (_var_) : "cc") +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \ + "cr0, 0 @ fmrx %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ + __v; \ +}) + +#define fmxr(_vfp_, _var_) ({ \ + asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \ + "cr0, 0 @ fmxr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc"); \ +})
#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9e0c1ac3d13e..5ea7b3319671 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -420,7 +420,7 @@ config AMPERE_ERRATUM_AC03_CPU_38 default y help This option adds an alternative code sequence to work around Ampere - erratum AC03_CPU_38 on AmpereOne. + errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
The affected design reports FEAT_HAFDBS as not implemented in ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0 diff --git a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts index 47a389d9ff7d..9d74fa6bfed9 100644 --- a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts +++ b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts @@ -32,7 +32,7 @@ memory@80000000 { device_type = "memory"; reg = <0x0 0x80000000 0x3da00000>, <0x0 0xc0000000 0x40000000>, - <0x8 0x80000000 0x40000000>; + <0x8 0x80000000 0x80000000>; };
gpio-keys { diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi index 84ec6c1aa12b..2c184f9e0fc3 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi @@ -731,7 +731,7 @@ opp-850000000 { opp-900000000-3 { opp-hz = /bits/ 64 <900000000>; opp-microvolt = <850000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; };
opp-900000000-4 { @@ -743,13 +743,13 @@ opp-900000000-4 { opp-900000000-5 { opp-hz = /bits/ 64 <900000000>; opp-microvolt = <825000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; };
opp-950000000-3 { opp-hz = /bits/ 64 <950000000>; opp-microvolt = <900000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; };
opp-950000000-4 { @@ -761,13 +761,13 @@ opp-950000000-4 { opp-950000000-5 { opp-hz = /bits/ 64 <950000000>; opp-microvolt = <850000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; };
opp-1000000000-3 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <950000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; };
opp-1000000000-4 { @@ -779,7 +779,7 @@ opp-1000000000-4 { opp-1000000000-5 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <875000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; }; };
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi index b78f408110bf..34e18eb5d7f4 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi @@ -1312,6 +1312,7 @@ &xhci3 { usb2-lpm-disable; vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&usb_vbus>; + mediatek,u3p-dis-msk = <1>; };
#include <arm/cros-ec-keyboard.dtsi> diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index 20e6d90cc411..d21ba00a5bd5 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -2766,10 +2766,10 @@ dp_intf0: dp-intf@1c015000 { compatible = "mediatek,mt8195-dp-intf"; reg = <0 0x1c015000 0 0x1000>; interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>; - clocks = <&vdosys0 CLK_VDO0_DP_INTF0>, - <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, + clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, + <&vdosys0 CLK_VDO0_DP_INTF0>, <&apmixedsys CLK_APMIXED_TVDPLL1>; - clock-names = "engine", "pixel", "pll"; + clock-names = "pixel", "engine", "pll"; status = "disabled"; };
@@ -3036,10 +3036,10 @@ dp_intf1: dp-intf@1c113000 { reg = <0 0x1c113000 0 0x1000>; interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>; power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>; - clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>, - <&vdosys1 CLK_VDO1_DPINTF>, + clocks = <&vdosys1 CLK_VDO1_DPINTF>, + <&vdosys1 CLK_VDO1_DP_INTF0_MM>, <&apmixedsys CLK_APMIXED_TVDPLL2>; - clock-names = "engine", "pixel", "pll"; + clock-names = "pixel", "engine", "pll"; status = "disabled"; };
diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 44bea063aedb..f6766fa8df34 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -1951,6 +1951,7 @@ apps_smmu: iommu@15000000 { reg = <0x0 0x15000000 0x0 0x100000>; #iommu-cells = <2>; #global-interrupts = <2>; + dma-coherent;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, @@ -2089,6 +2090,7 @@ pcie_smmu: iommu@15200000 { reg = <0x0 0x15200000 0x0 0x80000>; #iommu-cells = <2>; #global-interrupts = <2>; + dma-coherent;
interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi index 4b72de43b71c..71d51febabc1 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi @@ -145,8 +145,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index a877738c3048..edc942c84639 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -997,8 +997,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; };
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi index 3f01b096cfb7..d61f7894e55c 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi @@ -1004,8 +1004,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts index 294eb2de263d..f5e124b235c8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts @@ -32,12 +32,12 @@ chosen { backlight: edp-backlight { compatible = "pwm-backlight"; power-supply = <&vcc_12v>; - pwms = <&pwm0 0 740740 0>; + pwms = <&pwm0 0 125000 0>; };
bat: battery { compatible = "simple-battery"; - charge-full-design-microamp-hours = <9800000>; + charge-full-design-microamp-hours = <10000000>; voltage-max-design-microvolt = <4350000>; voltage-min-design-microvolt = <3000000>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts index a337f547caf5..6a02db4f073f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts @@ -13,7 +13,7 @@
/ { model = "Hardkernel ODROID-M1"; - compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568"; + compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
aliases { ethernet0 = &gmac0; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts index 2f954729f353..7897323376a5 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts @@ -123,7 +123,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { no-map; };
- c66_1_dma_memory_region: c66-dma-memory@a6000000 { + c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; @@ -135,7 +135,7 @@ c66_0_memory_region: c66-memory@a6100000 { no-map; };
- c66_0_dma_memory_region: c66-dma-memory@a7000000 { + c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts index 42fe8eee9ec8..ccacb65683b5 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts @@ -119,7 +119,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { no-map; };
- c66_1_dma_memory_region: c66-dma-memory@a6000000 { + c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; @@ -131,7 +131,7 @@ c66_0_memory_region: c66-memory@a6100000 { no-map; };
- c66_0_dma_memory_region: c66-dma-memory@a7000000 { + c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 5fd7caea4419..5a7dfeb8e8eb 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -143,6 +143,7 @@ #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
#define AMPERE_CPU_PART_AMPERE1 0xAC3 +#define AMPERE_CPU_PART_AMPERE1A 0xAC4
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
@@ -212,6 +213,7 @@ #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) +#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A) #define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index ae35939f395b..1cdae1b4f03b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -10,63 +10,63 @@ #include <asm/memory.h> #include <asm/sysreg.h>
-#define ESR_ELx_EC_UNKNOWN (0x00) -#define ESR_ELx_EC_WFx (0x01) +#define ESR_ELx_EC_UNKNOWN UL(0x00) +#define ESR_ELx_EC_WFx UL(0x01) /* Unallocated EC: 0x02 */ -#define ESR_ELx_EC_CP15_32 (0x03) -#define ESR_ELx_EC_CP15_64 (0x04) -#define ESR_ELx_EC_CP14_MR (0x05) -#define ESR_ELx_EC_CP14_LS (0x06) -#define ESR_ELx_EC_FP_ASIMD (0x07) -#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ -#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ +#define ESR_ELx_EC_CP15_32 UL(0x03) +#define ESR_ELx_EC_CP15_64 UL(0x04) +#define ESR_ELx_EC_CP14_MR UL(0x05) +#define ESR_ELx_EC_CP14_LS UL(0x06) +#define ESR_ELx_EC_FP_ASIMD UL(0x07) +#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */ +#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */ /* Unallocated EC: 0x0A - 0x0B */ -#define ESR_ELx_EC_CP14_64 (0x0C) -#define ESR_ELx_EC_BTI (0x0D) -#define ESR_ELx_EC_ILL (0x0E) +#define ESR_ELx_EC_CP14_64 UL(0x0C) +#define ESR_ELx_EC_BTI UL(0x0D) +#define ESR_ELx_EC_ILL UL(0x0E) /* Unallocated EC: 0x0F - 0x10 */ -#define ESR_ELx_EC_SVC32 (0x11) -#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ -#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ +#define ESR_ELx_EC_SVC32 UL(0x11) +#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */ +#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */ /* Unallocated EC: 0x14 */ -#define ESR_ELx_EC_SVC64 (0x15) -#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ -#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ -#define ESR_ELx_EC_SYS64 (0x18) -#define ESR_ELx_EC_SVE (0x19) -#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ +#define ESR_ELx_EC_SVC64 UL(0x15) +#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */ +#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */ +#define ESR_ELx_EC_SYS64 UL(0x18) +#define ESR_ELx_EC_SVE UL(0x19) +#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */ /* Unallocated EC: 0x1B */ -#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ -#define ESR_ELx_EC_SME (0x1D) +#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */ +#define ESR_ELx_EC_SME UL(0x1D) /* Unallocated EC: 0x1E */ -#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ -#define ESR_ELx_EC_IABT_LOW (0x20) -#define ESR_ELx_EC_IABT_CUR (0x21) -#define ESR_ELx_EC_PC_ALIGN (0x22) +#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */ +#define ESR_ELx_EC_IABT_LOW UL(0x20) +#define ESR_ELx_EC_IABT_CUR UL(0x21) +#define ESR_ELx_EC_PC_ALIGN UL(0x22) /* Unallocated EC: 0x23 */ -#define ESR_ELx_EC_DABT_LOW (0x24) -#define ESR_ELx_EC_DABT_CUR (0x25) -#define ESR_ELx_EC_SP_ALIGN (0x26) -#define ESR_ELx_EC_MOPS (0x27) -#define ESR_ELx_EC_FP_EXC32 (0x28) +#define ESR_ELx_EC_DABT_LOW UL(0x24) +#define ESR_ELx_EC_DABT_CUR UL(0x25) +#define ESR_ELx_EC_SP_ALIGN UL(0x26) +#define ESR_ELx_EC_MOPS UL(0x27) +#define ESR_ELx_EC_FP_EXC32 UL(0x28) /* Unallocated EC: 0x29 - 0x2B */ -#define ESR_ELx_EC_FP_EXC64 (0x2C) +#define ESR_ELx_EC_FP_EXC64 UL(0x2C) /* Unallocated EC: 0x2D - 0x2E */ -#define ESR_ELx_EC_SERROR (0x2F) -#define ESR_ELx_EC_BREAKPT_LOW (0x30) -#define ESR_ELx_EC_BREAKPT_CUR (0x31) -#define ESR_ELx_EC_SOFTSTP_LOW (0x32) -#define ESR_ELx_EC_SOFTSTP_CUR (0x33) -#define ESR_ELx_EC_WATCHPT_LOW (0x34) -#define ESR_ELx_EC_WATCHPT_CUR (0x35) +#define ESR_ELx_EC_SERROR UL(0x2F) +#define ESR_ELx_EC_BREAKPT_LOW UL(0x30) +#define ESR_ELx_EC_BREAKPT_CUR UL(0x31) +#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32) +#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33) +#define ESR_ELx_EC_WATCHPT_LOW UL(0x34) +#define ESR_ELx_EC_WATCHPT_CUR UL(0x35) /* Unallocated EC: 0x36 - 0x37 */ -#define ESR_ELx_EC_BKPT32 (0x38) +#define ESR_ELx_EC_BKPT32 UL(0x38) /* Unallocated EC: 0x39 */ -#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ +#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */ /* Unallocated EC: 0x3B */ -#define ESR_ELx_EC_BRK64 (0x3C) +#define ESR_ELx_EC_BRK64 UL(0x3C) /* Unallocated EC: 0x3D - 0x3F */ -#define ESR_ELx_EC_MAX (0x3F) +#define ESR_ELx_EC_MAX UL(0x3F)
#define ESR_ELx_EC_SHIFT (26) #define ESR_ELx_EC_WIDTH (6) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index f23c1dc3f002..8f003db7a696 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -312,10 +312,10 @@ struct zt_context { ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
-#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) +#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ - (SVE_SIG_ZREG_SIZE(vq) * n)) + (SVE_SIG_ZREG_SIZE(vq) * (n)))
#define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) @@ -326,7 +326,7 @@ struct zt_context {
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
-#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
#define ZT_SIG_CONTEXT_SIZE(n) \ (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 57b1d6a68256..f8d94902fbb5 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -472,6 +472,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = { }; #endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 +static const struct midr_range erratum_ac03_cpu_38_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + MIDR_ALL_VERSIONS(MIDR_AMPERE1A), + {}, +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -789,7 +797,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "AmpereOne erratum AC03_CPU_38", .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, - ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), + ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list), }, #endif { diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 6e4dba9eadef..8d21ab904f1a 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -415,9 +415,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, return; }
-static __always_inline void do_ffa_mem_xfer(const u64 func_id, - struct arm_smccc_res *res, - struct kvm_cpu_context *ctxt) +static void __do_ffa_mem_xfer(const u64 func_id, + struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) { DECLARE_REG(u32, len, ctxt, 1); DECLARE_REG(u32, fraglen, ctxt, 2); @@ -428,9 +428,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, u32 offset, nr_ranges; int ret = 0;
- BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && - func_id != FFA_FN64_MEM_LEND); - if (addr_mbz || npages_mbz || fraglen > len || fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { ret = FFA_RET_INVALID_PARAMETERS; @@ -449,6 +446,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; }
+ if (len > ffa_desc_buf.len) { + ret = FFA_RET_NO_MEMORY; + goto out_unlock; + } + buf = hyp_buffers.tx; memcpy(buf, host_buffers.tx, fraglen);
@@ -498,6 +500,13 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; }
+#define do_ffa_mem_xfer(fid, res, ctxt) \ + do { \ + BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \ + (fid) != FFA_FN64_MEM_LEND); \ + __do_ffa_mem_xfer((fid), (res), (ctxt)); \ + } while (0); + static void do_ffa_mem_reclaim(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) { diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index e06ce147c0b7..fb87219fc3b4 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -116,7 +116,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) { /* regs will be equal to current_pt_regs() */ struct kernel_clone_args args = { - .flags = regs->d1 & ~CSIGNAL, + .flags = (u32)(regs->d1) & ~CSIGNAL, .pidfd = (int __user *)regs->d3, .child_tid = (int __user *)regs->d4, .parent_tid = (int __user *)regs->d3, diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 6fc2248ca561..fccf742c55c2 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -96,6 +96,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10 tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" + depends on BROKEN depends on PPC64 && CPU_LITTLE_ENDIAN && VSX select CRYPTO_LIB_AES select CRYPTO_ALGAPI diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 2bc53c646ccd..83848b534cb1 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -39,6 +39,12 @@ #define STDX_BE stringify_in_c(stdbrx) #endif
+#ifdef CONFIG_CC_IS_CLANG +#define DS_FORM_CONSTRAINT "Z<>" +#else +#define DS_FORM_CONSTRAINT "YZ<>" +#endif + #else /* 32-bit */
/* operations for longs and pointers */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 5bf6a4d49268..d1ea554c33ed 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,6 +11,7 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> #include <asm/asm-const.h> +#include <asm/asm-compat.h>
/* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); else - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
return t; } @@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); else - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); }
#define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index ccc91bf9b034..a81bd825087c 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -6,6 +6,7 @@ #include <asm/page.h> #include <asm/extable.h> #include <asm/kup.h> +#include <asm/asm-compat.h>
#ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ @@ -92,12 +93,6 @@ __pu_failed: \ : label) #endif
-#ifdef CONFIG_CC_IS_CLANG -#define DS_FORM_CONSTRAINT "Z<>" -#else -#define DS_FORM_CONSTRAINT "YZ<>" -#endif - #ifdef __powerpc64__ #ifdef CONFIG_PPC_KERNEL_PREFIXED #define __put_user_asm2_goto(x, ptr, label) \ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 647b0b445e89..0c94db8e2bde 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -41,12 +41,12 @@ #include "head_32.h"
.macro compare_to_kernel_boundary scratch, addr -#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 +#if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000 /* By simply checking Address >= 0x80000000, we know if its a kernel address */ not. \scratch, \addr #else rlwinm \scratch, \addr, 16, 0xfff8 - cmpli cr0, \scratch, PAGE_OFFSET@h + cmpli cr0, \scratch, TASK_SIZE@h #endif .endm
@@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ mfspr r10, SPRN_SRR0 mtspr SPRN_MD_EPN, r10 rlwinm r11, r10, 16, 0xfff8 - cmpli cr1, r11, PAGE_OFFSET@h + cmpli cr1, r11, TASK_SIZE@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S index 48fc6658053a..894cb939cd2b 100644 --- a/arch/powerpc/kernel/vdso/gettimeofday.S +++ b/arch/powerpc/kernel/vdso/gettimeofday.S @@ -38,11 +38,7 @@ .else addi r4, r5, VDSO_DATA_OFFSET .endif -#ifdef __powerpc64__ bl CFUNC(DOTSYM(\funct)) -#else - bl \funct -#endif PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) #ifdef __powerpc64__ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 324501630278..a947dff35d65 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -149,11 +149,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr();
- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true); if (debug_pagealloc_enabled_or_kfence()) { top = boundary; } else { - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true); mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); }
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h index 395518a1664e..a50a1d23523f 100644 --- a/arch/riscv/include/asm/kvm_vcpu_pmu.h +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -10,6 +10,7 @@ #define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h> +#include <asm/kvm_vcpu_insn.h> #include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI @@ -57,11 +58,11 @@ struct kvm_pmu {
#if defined(CONFIG_32BIT) #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #else #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #endif
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); @@ -92,8 +93,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); struct kvm_pmu { };
+static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { + *val = 0; + return KVM_INSN_CONTINUE_NEXT_SEPC; + } else { + return KVM_INSN_ILLEGAL_TRAP; + } +} + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = 0, .count = 0, .func = NULL }, +{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index 3348a61de7d9..2932791e9388 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra); - while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) + while (fp && !(fp & 0x7) && entry->nr < entry->max_stack) fp = user_backtrace(entry, fp, 0); }
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 9cd97091c723..7a7fe40d0930 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -91,8 +91,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) run->riscv_sbi.args[3] = cp->a3; run->riscv_sbi.args[4] = cp->a4; run->riscv_sbi.args[5] = cp->a5; - run->riscv_sbi.ret[0] = cp->a0; - run->riscv_sbi.ret[1] = cp->a1; + run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED; + run->riscv_sbi.ret[1] = 0; }
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 006041fbb65f..905ac8a3f716 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -14,6 +14,7 @@ #include <asm/insn.h> #include <asm/insn-eval.h> #include <asm/pgtable.h> +#include <asm/traps.h>
/* MMIO direction */ #define EPT_READ 0 @@ -405,6 +406,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) return -EINVAL; }
+ if (!fault_in_kernel_space(ve->gla)) { + WARN_ONCE(1, "Access to userspace address is not supported"); + return -EINVAL; + } + /* * Reject EPT violation #VEs that split pages. * diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index cc5c6a326496..4110246aba12 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1602,6 +1602,7 @@ static void pt_event_stop(struct perf_event *event, int mode) * see comment in intel_pt_interrupt(). */ WRITE_ONCE(pt->handle_nmi, 0); + barrier();
pt_config_stop(event);
@@ -1653,11 +1654,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, return 0;
/* - * Here, handle_nmi tells us if the tracing is on + * There is no PT interrupt in this mode, so stop the trace and it will + * remain stopped while the buffer is copied. */ - if (READ_ONCE(pt->handle_nmi)) - pt_config_stop(event); - + pt_config_stop(event); pt_read_offset(buf); pt_update_head(pt);
@@ -1669,11 +1669,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, ret = perf_output_copy_aux(&pt->handle, handle, from, to);
/* - * If the tracing was on when we turned up, restart it. - * Compiler barrier not needed as we couldn't have been - * preempted by anything that touches pt->handle_nmi. + * Here, handle_nmi tells us if the tracing was on. + * If the tracing was on, restart it. */ - if (pt->handle_nmi) + if (READ_ONCE(pt->handle_nmi)) pt_config_start(event);
return ret; diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index f896eed4516c..529c36a98d9e 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -165,6 +165,14 @@ void acpi_generic_reduced_hw_init(void); void x86_default_set_root_pointer(u64 addr); u64 x86_default_get_root_pointer(void);
+#ifdef CONFIG_XEN_PV +/* A Xen PV domain needs a special acpi_os_ioremap() handling. */ +extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, + acpi_size size); +void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +#define acpi_os_ioremap acpi_os_ioremap +#endif + #else /* !CONFIG_ACPI */
#define acpi_lapic 0 diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 66837b8c67f1..f2e245741afc 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -63,7 +63,11 @@ extern u64 arch_irq_stat(void); #define local_softirq_pending_ref pcpu_hot.softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL) -static inline void kvm_set_cpu_l1tf_flush_l1d(void) +/* + * This function is called from noinstr interrupt contexts + * and must be inlined to not get instrumentation. + */ +static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); } @@ -78,7 +82,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void) return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); } #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ -static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } +static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { } #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
#endif /* _ASM_X86_HARDIRQ_H */ diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 13639e57e1f8..10603e185111 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -13,15 +13,18 @@
#include <asm/irq_stack.h>
+typedef void (*idtentry_t)(struct pt_regs *regs); + /** * DECLARE_IDTENTRY - Declare functions for simple IDT entry points * No error code pushed by hardware * @vector: Vector number (ignored for C) * @func: Function name of the entry point * - * Declares three functions: + * Declares four functions: * - The ASM entry point: asm_##func * - The XEN PV trap entry point: xen_##func (maybe unused) + * - The C handler called from the FRED event dispatcher (maybe unused) * - The C handler called from the ASM entry point * * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it @@ -31,6 +34,7 @@ #define DECLARE_IDTENTRY(vector, func) \ asmlinkage void asm_##func(void); \ asmlinkage void xen_asm_##func(void); \ + void fred_##func(struct pt_regs *regs); \ __visible void func(struct pt_regs *regs)
/** @@ -137,6 +141,17 @@ static __always_inline void __##func(struct pt_regs *regs, \ #define DEFINE_IDTENTRY_RAW(func) \ __visible noinstr void func(struct pt_regs *regs)
+/** + * DEFINE_FREDENTRY_RAW - Emit code for raw FRED entry points + * @func: Function name of the entry point + * + * @func is called from the FRED event dispatcher with interrupts disabled. + * + * See @DEFINE_IDTENTRY_RAW for further details. + */ +#define DEFINE_FREDENTRY_RAW(func) \ +noinstr void fred_##func(struct pt_regs *regs) + /** * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points * Error code pushed by hardware @@ -197,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \ irqentry_state_t state = irqentry_enter(regs); \ u32 vector = (u32)(u8)error_code; \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ run_irq_on_irqstack_cond(__##func, regs, vector); \ instrumentation_end(); \ irqentry_exit(regs, state); \ @@ -233,17 +248,27 @@ static noinline void __##func(struct pt_regs *regs, u32 vector) #define DEFINE_IDTENTRY_SYSVEC(func) \ static void __##func(struct pt_regs *regs); \ \ +static __always_inline void instr_##func(struct pt_regs *regs) \ +{ \ + run_sysvec_on_irqstack_cond(__##func, regs); \ +} \ + \ __visible noinstr void func(struct pt_regs *regs) \ { \ irqentry_state_t state = irqentry_enter(regs); \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ - run_sysvec_on_irqstack_cond(__##func, regs); \ + instr_##func (regs); \ instrumentation_end(); \ irqentry_exit(regs, state); \ } \ \ +void fred_##func(struct pt_regs *regs) \ +{ \ + instr_##func (regs); \ +} \ + \ static noinline void __##func(struct pt_regs *regs)
/** @@ -260,19 +285,29 @@ static noinline void __##func(struct pt_regs *regs) #define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ static __always_inline void __##func(struct pt_regs *regs); \ \ +static __always_inline void instr_##func(struct pt_regs *regs) \ +{ \ + __irq_enter_raw(); \ + __##func (regs); \ + __irq_exit_raw(); \ +} \ + \ __visible noinstr void func(struct pt_regs *regs) \ { \ irqentry_state_t state = irqentry_enter(regs); \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ - __irq_enter_raw(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ - __##func (regs); \ - __irq_exit_raw(); \ + instr_##func (regs); \ instrumentation_end(); \ irqentry_exit(regs, state); \ } \ \ +void fred_##func(struct pt_regs *regs) \ +{ \ + instr_##func (regs); \ +} \ + \ static __always_inline void __##func(struct pt_regs *regs)
/** @@ -410,15 +445,18 @@ __visible noinstr void func(struct pt_regs *regs, \ /* C-Code mapping */ #define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW #define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW +#define DEFINE_FREDENTRY_NMI DEFINE_FREDENTRY_RAW
#ifdef CONFIG_X86_64 #define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST #define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST #define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST +#define DEFINE_FREDENTRY_MCE DEFINE_FREDENTRY_RAW
#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST #define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST #define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST +#define DEFINE_FREDENTRY_DEBUG DEFINE_FREDENTRY_RAW #endif
#else /* !__ASSEMBLY__ */ @@ -655,23 +693,36 @@ DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi); DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot); DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single); DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function); +#else +# define fred_sysvec_reschedule_ipi NULL +# define fred_sysvec_reboot NULL +# define fred_sysvec_call_function_single NULL +# define fred_sysvec_call_function NULL #endif
#ifdef CONFIG_X86_LOCAL_APIC # ifdef CONFIG_X86_MCE_THRESHOLD DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold); +# else +# define fred_sysvec_threshold NULL # endif
# ifdef CONFIG_X86_MCE_AMD DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error); +# else +# define fred_sysvec_deferred_error NULL # endif
# ifdef CONFIG_X86_THERMAL_VECTOR DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal); +# else +# define fred_sysvec_thermal NULL # endif
# ifdef CONFIG_IRQ_WORK DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); +# else +# define fred_sysvec_irq_work NULL # endif #endif
@@ -679,12 +730,16 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi); DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi); +#else +# define fred_sysvec_kvm_posted_intr_ipi NULL +# define fred_sysvec_kvm_posted_intr_wakeup_ipi NULL +# define fred_sysvec_kvm_posted_intr_nested_ipi NULL #endif
#if IS_ENABLED(CONFIG_HYPERV) DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); -DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); +DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); #endif
#if IS_ENABLED(CONFIG_ACRN_GUEST) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c55c0ef47a18..49c39f5dc1c9 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1901,3 +1901,14 @@ u64 x86_default_get_root_pointer(void) { return boot_params.acpi_rsdp_addr; } + +#ifdef CONFIG_XEN_PV +void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +{ + return ioremap_cache(phys, size); +} + +void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) = + x86_acpi_os_ioremap; +EXPORT_SYMBOL_GPL(acpi_os_ioremap); +#endif diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 166692f2d501..c7f8c3200e8d 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -474,24 +474,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) { struct sgx_epc_page *page; int nid_of_current = numa_node_id(); - int nid = nid_of_current; + int nid_start, nid;
- if (node_isset(nid_of_current, sgx_numa_mask)) { - page = __sgx_alloc_epc_page_from_node(nid_of_current); - if (page) - return page; - } - - /* Fall back to the non-local NUMA nodes: */ - while (true) { - nid = next_node_in(nid, sgx_numa_mask); - if (nid == nid_of_current) - break; + /* + * Try local node first. If it doesn't have an EPC section, + * fall back to the non-local NUMA nodes. + */ + if (node_isset(nid_of_current, sgx_numa_mask)) + nid_start = nid_of_current; + else + nid_start = next_node_in(nid_of_current, sgx_numa_mask);
+ nid = nid_start; + do { page = __sgx_alloc_epc_page_from_node(nid); if (page) return page; - } + + nid = next_node_in(nid, sgx_numa_mask); + } while (nid != nid_start);
return ERR_PTR(-ENOMEM); } diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 578d16fc040f..5481c7c5db30 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/serial_8250.h> +#include <linux/acpi.h> #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/acpi.h> diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index c94dec6a1834..1f54eedc3015 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <linux/dmi.h> #include <linux/range.h> +#include <linux/acpi.h>
#include <asm/pci-direct.h> #include <linux/sort.h> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4989095ab769..d595ef7c1de0 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -750,6 +750,27 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
#define LAM_U57_BITS 6
+static void enable_lam_func(void *__mm) +{ + struct mm_struct *mm = __mm; + + if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { + write_cr3(__read_cr3() | mm->context.lam_cr3_mask); + set_tlbstate_lam_mode(mm); + } +} + +static void mm_enable_lam(struct mm_struct *mm) +{ + /* + * Even though the process must still be single-threaded at this + * point, kernel threads may be using the mm. IPI those kernel + * threads if they exist. + */ + on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); + set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); +} + static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) @@ -766,6 +787,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) if (mmap_write_lock_killable(mm)) return -EINTR;
+ /* + * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from + * being enabled unless the process is single threaded: + */ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; @@ -782,9 +807,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) return -EINVAL; }
- write_cr3(__read_cr3() | mm->context.lam_cr3_mask); - set_tlbstate_lam_mode(mm); - set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); + mm_enable_lam(mm);
mmap_write_unlock(mm);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2a187c0cbd5b..ce77dac9a020 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -60,6 +60,7 @@ #include <linux/stackprotector.h> #include <linux/cpuhotplug.h> #include <linux/mc146818rtc.h> +#include <linux/acpi.h>
#include <asm/acpi.h> #include <asm/cacheinfo.h> diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 3f0718b4a7d2..268627a17cf0 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -8,6 +8,7 @@ #include <linux/ioport.h> #include <linux/export.h> #include <linux/pci.h> +#include <linux/acpi.h>
#include <asm/acpi.h> #include <asm/bios_ebda.h> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 66a2c4c0ae10..1380f3489777 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2443,6 +2443,29 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13)) + +int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) +{ + if (data & X2APIC_ICR_RESERVED_BITS) + return 1; + + /* + * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but + * only AMD requires it to be zero, Intel essentially just ignores the + * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled, + * the CPU performs the reserved bits checks, i.e. the underlying CPU + * behavior will "win". Arbitrarily clear the BUSY bit, as there is no + * sane way to provide consistent behavior with respect to hardware. + */ + data &= ~APIC_ICR_BUSY; + + kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); + kvm_lapic_set_reg64(apic, APIC_ICR, data); + trace_kvm_apic_write(APIC_ICR, data); + return 0; +} + /* emulate APIC access in a trap manner */ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) { @@ -2460,7 +2483,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) * maybe-unecessary write, and both are in the noise anyways. */ if (apic_x2apic_mode(apic) && offset == APIC_ICR) - kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)); + WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR))); else kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); } @@ -3153,16 +3176,6 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) return 0; }
-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) -{ - data &= ~APIC_ICR_BUSY; - - kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); - kvm_lapic_set_reg64(apic, APIC_ICR, data); - trace_kvm_apic_write(APIC_ICR, data); - return 0; -} - static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) { u32 low; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 453ea95b667d..2fbae48f0b47 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -497,9 +497,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, { struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); - unsigned long new_lam = mm_lam_cr3_mask(next); bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); unsigned cpu = smp_processor_id(); + unsigned long new_lam; u64 next_tlb_gen; bool need_flush; u16 new_asid; @@ -622,9 +622,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); }
- /* - * Start remote flushes and then read tlb_gen. - */ + /* Start receiving IPIs and then read tlb_gen (and LAM below) */ if (next != &init_mm) cpumask_set_cpu(cpu, mm_cpumask(next)); next_tlb_gen = atomic64_read(&next->context.tlb_gen); @@ -636,6 +634,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, barrier(); }
+ new_lam = mm_lam_cr3_mask(next); set_tlbstate_lam_mode(next); if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b33afb240601..98a9bb92d75c 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev) return;
rp = pcie_find_root_port(dev); - if (!rp->pm_cap) + if (!rp || !rp->pm_cap) return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >> @@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev) u16 pmc;
rp = pcie_find_root_port(dev); - if (!rp->pm_cap) + if (!rp || !rp->pm_cap) return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 9d4a9311e819..6b201e64d8ab 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2019,10 +2019,7 @@ void __init xen_reserve_special_pages(void)
void __init xen_pt_check_e820(void) { - if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { - xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); - BUG(); - } + xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table"); }
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 4c2bf989edaf..11b5c042d4fa 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -70,6 +70,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/acpi.h>
#include <asm/cache.h> #include <asm/setup.h> @@ -80,6 +81,7 @@ #include <asm/xen/hypervisor.h> #include <xen/balloon.h> #include <xen/grant_table.h> +#include <xen/hvc-console.h>
#include "multicalls.h" #include "xen-ops.h" @@ -794,6 +796,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, return ret; }
+/* Remapped non-RAM areas */ +#define NR_NONRAM_REMAP 4 +static struct nonram_remap { + phys_addr_t maddr; + phys_addr_t paddr; + size_t size; +} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init; +static unsigned int nr_nonram_remap __ro_after_init; + +/* + * Do the real remapping of non-RAM regions as specified in the + * xen_nonram_remap[] array. + * In case of an error just crash the system. + */ +void __init xen_do_remap_nonram(void) +{ + unsigned int i; + unsigned int remapped = 0; + const struct nonram_remap *remap = xen_nonram_remap; + unsigned long pfn, mfn, end_pfn; + + for (i = 0; i < nr_nonram_remap; i++) { + end_pfn = PFN_UP(remap->paddr + remap->size); + pfn = PFN_DOWN(remap->paddr); + mfn = PFN_DOWN(remap->maddr); + while (pfn < end_pfn) { + if (!set_phys_to_machine(pfn, mfn)) + panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n", + pfn, mfn); + + pfn++; + mfn++; + remapped++; + } + + remap++; + } + + pr_info("Remapped %u non-RAM page(s)\n", remapped); +} + +#ifdef CONFIG_ACPI +/* + * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM + * regions into account. + * Any attempt to map an area crossing a remap boundary will produce a + * WARN() splat. + * phys is related to remap->maddr on input and will be rebased to remap->paddr. + */ +static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys, + acpi_size size) +{ + unsigned int i; + const struct nonram_remap *remap = xen_nonram_remap; + + for (i = 0; i < nr_nonram_remap; i++) { + if (phys + size > remap->maddr && + phys < remap->maddr + remap->size) { + WARN_ON(phys < remap->maddr || + phys + size > remap->maddr + remap->size); + phys += remap->paddr - remap->maddr; + break; + } + } + + return x86_acpi_os_ioremap(phys, size); +} +#endif /* CONFIG_ACPI */ + +/* + * Add a new non-RAM remap entry. + * In case of no free entry found, just crash the system. + */ +void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, + unsigned long size) +{ + BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK)); + + if (nr_nonram_remap == NR_NONRAM_REMAP) { + xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n"); + BUG(); + } + +#ifdef CONFIG_ACPI + /* Switch to the Xen acpi_os_ioremap() variant. */ + if (nr_nonram_remap == 0) + acpi_os_ioremap = xen_acpi_os_ioremap; +#endif + + xen_nonram_remap[nr_nonram_remap].maddr = maddr; + xen_nonram_remap[nr_nonram_remap].paddr = paddr; + xen_nonram_remap[nr_nonram_remap].size = size; + + nr_nonram_remap++; +} + #ifdef CONFIG_XEN_DEBUG_FS #include <linux/debugfs.h> #include "debugfs.h" diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 380591028cb8..dc822124cacb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -15,12 +15,12 @@ #include <linux/cpuidle.h> #include <linux/cpufreq.h> #include <linux/memory_hotplug.h> +#include <linux/acpi.h>
#include <asm/elf.h> #include <asm/vdso.h> #include <asm/e820/api.h> #include <asm/setup.h> -#include <asm/acpi.h> #include <asm/numa.h> #include <asm/idtentry.h> #include <asm/xen/hypervisor.h> @@ -47,6 +47,9 @@ bool xen_pv_pci_possible; /* E820 map used during setting up memory. */ static struct e820_table xen_e820_table __initdata;
+/* Number of initially usable memory pages. */ +static unsigned long ini_nr_pages __initdata; + /* * Buffer used to remap identity mapped pages. We only need the virtual space. * The physical page behind this address is remapped as needed to different @@ -213,7 +216,7 @@ static int __init xen_free_mfn(unsigned long mfn) * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, - unsigned long end_pfn, unsigned long nr_pages) + unsigned long end_pfn) { unsigned long pfn, end; int ret; @@ -221,7 +224,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, WARN_ON(start_pfn > end_pfn);
/* Release pages first. */ - end = min(end_pfn, nr_pages); + end = min(end_pfn, ini_nr_pages); for (pfn = start_pfn; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn);
@@ -342,15 +345,14 @@ static void __init xen_do_set_identity_and_remap_chunk( * to Xen and not remapped. */ static unsigned long __init xen_set_identity_and_remap_chunk( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, - unsigned long remap_pfn) + unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) { unsigned long pfn; unsigned long i = 0; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0) - remap_pfn = nr_pages; + remap_pfn = ini_nr_pages;
while (i < n) { unsigned long cur_pfn = start_pfn + i; @@ -359,19 +361,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */ - if (cur_pfn >= nr_pages) { + if (cur_pfn >= ini_nr_pages) { /* Identity map remaining pages */ set_phys_range_identity(cur_pfn, cur_pfn + size); break; } - if (cur_pfn + size > nr_pages) - size = nr_pages - cur_pfn; + if (cur_pfn + size > ini_nr_pages) + size = ini_nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn); if (!remap_range_size) { pr_warn("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages); + cur_pfn + left); break; } /* Adjust size to fit in current e820 RAM region */ @@ -398,18 +400,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk( }
static unsigned long __init xen_count_remap_pages( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, + unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pages) { - if (start_pfn >= nr_pages) + if (start_pfn >= ini_nr_pages) return remap_pages;
- return remap_pages + min(end_pfn, nr_pages) - start_pfn; + return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; }
-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, +static unsigned long __init xen_foreach_remap_area( unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, - unsigned long nr_pages, unsigned long last_val)) + unsigned long last_val)) { phys_addr_t start = 0; unsigned long ret_val = 0; @@ -437,8 +439,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) - ret_val = func(start_pfn, end_pfn, nr_pages, - ret_val); + ret_val = func(start_pfn, end_pfn, ret_val); start = end; } } @@ -495,6 +496,8 @@ void __init xen_remap_memory(void) set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
pr_info("Remapped %ld page(s)\n", remapped); + + xen_do_remap_nonram(); }
static unsigned long __init xen_get_pages_limit(void) @@ -568,7 +571,7 @@ static void __init xen_ignore_unusable(void) } }
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) +static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) { struct e820_entry *entry; unsigned mapcnt; @@ -625,6 +628,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) return 0; }
+/* + * Swap a non-RAM E820 map entry with RAM above ini_nr_pages. + * Note that the E820 map is modified accordingly, but the P2M map isn't yet. + * The adaption of the P2M must be deferred until page allocation is possible. + */ +static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry) +{ + struct e820_entry *entry; + unsigned int mapcnt; + phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); + phys_addr_t swap_addr, swap_size, entry_end; + + swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); + swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + entry_end = entry->addr + entry->size; + if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && + entry_end - swap_size >= mem_end) { + /* Reduce RAM entry by needed space (whole pages). */ + entry->size -= swap_size; + + /* Add new entry at the end of E820 map. */ + entry = xen_e820_table.entries + + xen_e820_table.nr_entries; + xen_e820_table.nr_entries++; + + /* Fill new entry (keep size and page offset). */ + entry->type = swap_entry->type; + entry->addr = entry_end - swap_size + + swap_addr - swap_entry->addr; + entry->size = swap_entry->size; + + /* Convert old entry to RAM, align to pages. */ + swap_entry->type = E820_TYPE_RAM; + swap_entry->addr = swap_addr; + swap_entry->size = swap_size; + + /* Remember PFN<->MFN relation for P2M update. */ + xen_add_remap_nonram(swap_addr, entry_end - swap_size, + swap_size); + + /* Order E820 table and merge entries. */ + e820__update_table(&xen_e820_table); + + return; + } + + entry++; + } + + xen_raw_console_write("No suitable area found for required E820 entry remapping action\n"); + BUG(); +} + +/* + * Look for non-RAM memory types in a specific guest physical area and move + * those away if possible (ACPI NVS only for now). + */ +static void __init xen_e820_resolve_conflicts(phys_addr_t start, + phys_addr_t size) +{ + struct e820_entry *entry; + unsigned int mapcnt; + phys_addr_t end; + + if (!size) + return; + + end = start + size; + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + if (entry->addr >= end) + return; + + if (entry->addr + entry->size > start && + entry->type == E820_TYPE_NVS) + xen_e820_swap_entry_with_ram(entry); + + entry++; + } +} + +/* + * Check for an area in physical memory to be usable for non-movable purposes. + * An area is considered to usable if the used E820 map lists it to be RAM or + * some other type which can be moved to higher PFNs while keeping the MFNs. + * In case the area is not usable, crash the system with an error message. + */ +void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component) +{ + xen_e820_resolve_conflicts(start, size); + + if (!xen_is_e820_reserved(start, size)) + return; + + xen_raw_console_write("Xen hypervisor allocated "); + xen_raw_console_write(component); + xen_raw_console_write(" memory conflicts with E820 map\n"); + BUG(); +} + /* * Like memcpy, but with physical addresses for dest and src. */ @@ -684,20 +792,20 @@ static void __init xen_reserve_xen_mfnlist(void) **/ char * __init xen_memory_setup(void) { - unsigned long max_pfn, pfn_s, n_pfns; + unsigned long pfn_s, n_pfns; phys_addr_t mem_end, addr, size, chunk_size; u32 type; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; + unsigned long maxmem_pages; int i; int op;
xen_parse_512gb(); - max_pfn = xen_get_pages_limit(); - max_pfn = min(max_pfn, xen_start_info->nr_pages); - mem_end = PFN_PHYS(max_pfn); + ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages); + mem_end = PFN_PHYS(ini_nr_pages);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); @@ -747,13 +855,35 @@ char * __init xen_memory_setup(void) /* Make sure the Xen-supplied memory map is well-ordered. */ e820__update_table(&xen_e820_table);
+ /* + * Check whether the kernel itself conflicts with the target E820 map. + * Failing now is better than running into weird problems later due + * to relocating (and even reusing) pages with kernel text or data. + */ + xen_chk_is_e820_usable(__pa_symbol(_text), + __pa_symbol(_end) - __pa_symbol(_text), + "kernel"); + + /* + * Check for a conflict of the xen_start_info memory with the target + * E820 map. + */ + xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), + "xen_start_info"); + + /* + * Check for a conflict of the hypervisor supplied page tables with + * the target E820 map. + */ + xen_pt_check_e820(); + max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ - max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); + max_pages += xen_foreach_remap_area(xen_count_remap_pages);
- if (max_pages > max_pfn) - extra_pages += max_pages - max_pfn; + if (max_pages > ini_nr_pages) + extra_pages += max_pages - ini_nr_pages;
/* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO @@ -762,8 +892,8 @@ char * __init xen_memory_setup(void) * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. */ - extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), - extra_pages, max_pages - max_pfn); + maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM)); + extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages); i = 0; addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; @@ -819,23 +949,6 @@ char * __init xen_memory_setup(void)
e820__update_table(e820_table);
- /* - * Check whether the kernel itself conflicts with the target E820 map. - * Failing now is better than running into weird problems later due - * to relocating (and even reusing) pages with kernel text or data. - */ - if (xen_is_e820_reserved(__pa_symbol(_text), - __pa_symbol(__bss_stop) - __pa_symbol(_text))) { - xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); - BUG(); - } - - /* - * Check for a conflict of the hypervisor supplied page tables with - * the target E820 map. - */ - xen_pt_check_e820(); - xen_reserve_xen_mfnlist();
/* Check for a conflict of the initrd with the target E820 map. */ @@ -863,7 +976,7 @@ char * __init xen_memory_setup(void) * Set identity map on non-RAM pages and prepare remapping the * underlying RAM. */ - xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); + xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 79cf93f2c92f..a6a21dd05527 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -43,8 +43,12 @@ void xen_mm_unpin_all(void); #ifdef CONFIG_X86_64 void __init xen_relocate_p2m(void); #endif +void __init xen_do_remap_nonram(void); +void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, + unsigned long size);
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); +void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component); unsigned long __ref xen_chk_extra_mem(unsigned long pfn); void __init xen_inv_extra_mem(void); void __init xen_remap_memory(void); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 3cce6de464a7..7e0dcded5713 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
/* if a merge has already been setup, then proceed with that first */ - if (bfqq->new_bfqq) - return bfqq->new_bfqq; + new_bfqq = bfqq->new_bfqq; + if (new_bfqq) { + while (new_bfqq->new_bfqq) + new_bfqq = new_bfqq->new_bfqq; + return new_bfqq; + }
/* * Check delayed stable merge for rotational or non-queueing @@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_put_queue(bfqq); }
-static void -bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct bfq_queue *bfqq) { + struct bfq_queue *new_bfqq = bfqq->new_bfqq; + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", (unsigned long)new_bfqq->pid); /* Save weight raising and idle window of the merged queues */ @@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_reassign_last_bfqq(bfqq, new_bfqq);
bfq_release_process_ref(bfqd, bfqq); + + return new_bfqq; }
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, @@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, * fulfilled, i.e., bic can be redirected to new_bfqq * and bfqq can be put. */ - bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, - new_bfqq); - /* - * If we get here, bio will be queued into new_queue, - * so use new_bfqq to decide whether bio and rq can be - * merged. - */ - bfqq = new_bfqq; + while (bfqq != new_bfqq) + bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
/* * Change also bqfd->bio_bfqq, as @@ -5699,9 +5701,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, * state before killing it. */ bfqq->bic = bic; - bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); - - return new_bfqq; + return bfq_merge_bfqqs(bfqd, bic, bfqq); }
/* @@ -6156,6 +6156,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bool waiting, idle_timer_disabled = false;
if (new_bfqq) { + struct bfq_queue *old_bfqq = bfqq; /* * Release the request's reference to the old bfqq * and make sure one is taken to the shared queue. @@ -6172,18 +6173,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) * new_bfqq. */ if (bic_to_bfqq(RQ_BIC(rq), true, - bfq_actuator_index(bfqd, rq->bio)) == bfqq) - bfq_merge_bfqqs(bfqd, RQ_BIC(rq), - bfqq, new_bfqq); + bfq_actuator_index(bfqd, rq->bio)) == bfqq) { + while (bfqq != new_bfqq) + bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq); + }
- bfq_clear_bfqq_just_created(bfqq); + bfq_clear_bfqq_just_created(old_bfqq); /* * rq is about to be enqueued into new_bfqq, * release rq reference on bfqq */ - bfq_put_queue(bfqq); + bfq_put_queue(old_bfqq); rq->elv.priv[1] = new_bfqq; - bfqq = new_bfqq; }
bfq_update_io_thinktime(bfqd, bfqq); @@ -6721,7 +6722,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) { bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
- if (bfqq_process_refs(bfqq) == 1) { + if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) { bfqq->pid = current->pid; bfq_clear_bfqq_coop(bfqq); bfq_clear_bfqq_split_coop(bfqq); @@ -6819,6 +6820,31 @@ static void bfq_prepare_request(struct request *rq) rq->elv.priv[0] = rq->elv.priv[1] = NULL; }
+static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq) +{ + struct bfq_queue *new_bfqq = bfqq->new_bfqq; + struct bfq_queue *waker_bfqq = bfqq->waker_bfqq; + + if (!waker_bfqq) + return NULL; + + while (new_bfqq) { + if (new_bfqq == waker_bfqq) { + /* + * If waker_bfqq is in the merge chain, and current + * is the only procress. + */ + if (bfqq_process_refs(waker_bfqq) == 1) + return NULL; + break; + } + + new_bfqq = new_bfqq->new_bfqq; + } + + return waker_bfqq; +} + /* * If needed, init rq, allocate bfq data structures associated with * rq, and increment reference counters in the destination bfq_queue @@ -6880,7 +6906,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) /* If the queue was seeky for too long, break it apart. */ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) && !bic->bfqq_data[a_idx].stably_merged) { - struct bfq_queue *old_bfqq = bfqq; + struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
/* Update bic before losing reference to bfqq */ if (bfq_bfqq_in_large_burst(bfqq)) @@ -6900,7 +6926,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) bfqq_already_existing = true;
if (!bfqq_already_existing) { - bfqq->waker_bfqq = old_bfqq->waker_bfqq; + bfqq->waker_bfqq = waker_bfqq; bfqq->tentative_waker_bfqq = NULL;
/* @@ -6910,7 +6936,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) * woken_list of the waker. See * bfq_check_waker for details. */ - if (bfqq->waker_bfqq) + if (waker_bfqq) hlist_add_head(&bfqq->woken_list_node, &bfqq->waker_bfqq->woken_list); } @@ -6932,7 +6958,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) * addition, if the queue has also just been split, we have to * resume its state. */ - if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { + if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && + bfqq_process_refs(bfqq) == 1) { bfqq->bic = bic; if (split) { /* diff --git a/block/partitions/core.c b/block/partitions/core.c index 962e4b57d64a..fc0ab5d8ab70 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -574,9 +574,11 @@ static bool blk_add_partition(struct gendisk *disk,
part = add_partition(disk, p, from, size, state->parts[p].flags, &state->parts[p].info); - if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) { - printk(KERN_ERR " %s: p%d could not be added: %ld\n", - disk->disk_name, p, -PTR_ERR(part)); + if (IS_ERR(part)) { + if (PTR_ERR(part) != -ENXIO) { + printk(KERN_ERR " %s: p%d could not be added: %pe\n", + disk->disk_name, p, part); + } return true; }
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index a5da8ccd353e..43af5fa510c0 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring, char *req, *p; int len;
- WARN_ON(!id_0 && !id_1 && !id_2); - if (id_0) { lookup = id_0->data; len = id_0->len; } else if (id_1) { lookup = id_1->data; len = id_1->len; - } else { + } else if (id_2) { lookup = id_2->data; len = id_2->len; + } else { + WARN_ON(1); + return ERR_PTR(-EINVAL); }
/* Construct an identifier "id:<keyid>". */ diff --git a/crypto/xor.c b/crypto/xor.c index 8e72e5d5db0d..56aa3169e871 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -83,33 +83,30 @@ static void __init do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) { int speed; - int i, j; - ktime_t min, start, diff; + unsigned long reps; + ktime_t min, start, t0;
tmpl->next = template_list; template_list = tmpl;
preempt_disable();
- min = (ktime_t)S64_MAX; - for (i = 0; i < 3; i++) { - start = ktime_get(); - for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimization */ - tmpl->do_2(BENCH_SIZE, b1, b2); - mb(); - } - diff = ktime_sub(ktime_get(), start); - if (diff < min) - min = diff; - } + reps = 0; + t0 = ktime_get(); + /* delay start until time has advanced */ + while ((start = ktime_get()) == t0) + cpu_relax(); + do { + mb(); /* prevent loop optimization */ + tmpl->do_2(BENCH_SIZE, b1, b2); + mb(); + } while (reps++ < REPS || (t0 = ktime_get()) == start); + min = ktime_sub(t0, start);
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] - if (!min) - min = 1; - speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index d6934ba7a315..28217a995f79 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -167,8 +167,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */ -#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ +#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \ GENMASK(((reg)->bit_width) - 1, 0)) +#define MASK_VAL_WRITE(reg, prev_val, val) \ + ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \ + ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
static ssize_t show_feedback_ctrs(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -852,6 +855,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */ cpc_ptr->cpu_id = pr->id; + spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */ ret = acpi_get_psd(cpc_ptr, handle); @@ -1057,7 +1061,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) }
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - *val = MASK_VAL(reg, *val); + *val = MASK_VAL_READ(reg, *val);
return 0; } @@ -1066,9 +1070,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; int size; + u64 prev_val; void __iomem *vaddr = NULL; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; + struct cpc_desc *cpc_desc;
size = GET_BIT_WIDTH(reg);
@@ -1101,8 +1107,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) return acpi_os_write_memory((acpi_physical_address)reg->address, val, size);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - val = MASK_VAL(reg, val); + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + cpc_desc = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + spin_lock(&cpc_desc->rmw_lock); + switch (size) { + case 8: + prev_val = readb_relaxed(vaddr); + break; + case 16: + prev_val = readw_relaxed(vaddr); + break; + case 32: + prev_val = readl_relaxed(vaddr); + break; + case 64: + prev_val = readq_relaxed(vaddr); + break; + default: + spin_unlock(&cpc_desc->rmw_lock); + return -EFAULT; + } + val = MASK_VAL_WRITE(reg, prev_val, val); + val |= prev_val; + }
switch (size) { case 8: @@ -1129,6 +1161,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) break; }
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + spin_unlock(&cpc_desc->rmw_lock); + return ret_val; }
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index a34d8578b3da..6ed5e9e56be2 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -544,8 +544,9 @@ int acpi_device_setup_files(struct acpi_device *dev) * If device has _STR, 'description' file is created */ if (acpi_has_method(dev->handle, "_STR")) { - status = acpi_evaluate_object(dev->handle, "_STR", - NULL, &buffer); + status = acpi_evaluate_object_typed(dev->handle, "_STR", + NULL, &buffer, + ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) buffer.pointer = NULL; dev->pnp.str_obj = buffer.pointer; diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c index ebd03e472955..0d1a82eeb4b0 100644 --- a/drivers/acpi/pmic/tps68470_pmic.c +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev) struct tps68470_pmic_opregion *opregion; acpi_status status;
- if (!dev || !tps68470_regmap) { - dev_warn(dev, "dev or regmap is NULL\n"); - return -EINVAL; - } + if (!tps68470_regmap) + return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) { dev_warn(dev, "acpi handle is NULL\n"); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 15f9d3d9c1cb..3a13b22c8d9a 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -508,6 +508,12 @@ static const struct dmi_system_id maingear_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), }, }, + { + /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + }, { /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ .matches = { diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 1168e29cae86..cd8745737545 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -618,6 +618,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc;
+ /* + * If the scmd was added to EH, via ata_qc_schedule_eh() -> + * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will + * have set DID_TIME_OUT (since libata does not have an abort + * handler). Thus, to clear DID_TIME_OUT, clear the host byte. + */ + set_host_byte(scmd, DID_OK); + ata_qc_for_each_raw(ap, qc, i) { if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index c91f8746289f..5377d094bf75 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1725,9 +1725,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); } else if (is_error && !have_sense) { ata_gen_ata_sense(qc); - } else { - /* Keep the SCSI ML and status byte, clear host byte. */ - cmd->result &= 0x0000ffff; }
ata_qc_done(qc); @@ -2393,7 +2390,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, case ALL_SUB_MPAGES: n = ata_msense_control_spg0(dev, buf, changeable); n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); - n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); + n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE); n += ata_msense_control_ata_feature(dev, buf + n); return n; default: diff --git a/drivers/base/core.c b/drivers/base/core.c index cb323700e952..60a0a4630a5b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -4485,9 +4485,11 @@ EXPORT_SYMBOL_GPL(device_destroy); */ int device_rename(struct device *dev, const char *new_name) { + struct subsys_private *sp = NULL; struct kobject *kobj = &dev->kobj; char *old_device_name = NULL; int error; + bool is_link_renamed = false;
dev = get_device(dev); if (!dev) @@ -4502,7 +4504,7 @@ int device_rename(struct device *dev, const char *new_name) }
if (dev->class) { - struct subsys_private *sp = class_to_subsys(dev->class); + sp = class_to_subsys(dev->class);
if (!sp) { error = -EINVAL; @@ -4511,16 +4513,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, new_name, kobject_namespace(kobj)); - subsys_put(sp); if (error) goto out; + + is_link_renamed = true; }
error = kobject_rename(kobj, new_name); - if (error) - goto out; - out: + if (error && is_link_renamed) + sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name, + old_device_name, kobject_namespace(kobj)); + subsys_put(sp); + put_device(dev);
kfree(old_device_name); diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index b58c42f1b1ce..0b18c6b46e65 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -844,6 +844,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, {} #endif
+/* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from device-supplied + * strings, and we don't want some device to be able to tell us "I would like to + * be sent my firmware from ../../../etc/shadow, please". + * + * Search for ".." surrounded by either '/' or start/end of string. + * + * This intentionally only looks at the firmware name, not at the firmware base + * directory or at symlink contents. + */ +static bool name_contains_dotdot(const char *name) +{ + size_t name_len = strlen(name); + + return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || + strstr(name, "/../") != NULL || + (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -864,6 +884,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; }
+ if (name_contains_dotdot(name)) { + dev_warn(device, + "Firmware load for '%s' refused, path contains '..' component\n", + name); + ret = -EINVAL; + goto out; + } + ret = _request_firmware_prepare(&fw, name, device, buf, size, offset, opt_flags); if (ret <= 0) /* error or already assigned */ @@ -941,6 +969,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. + * It must not contain any ".." path components - "foo/bar..bin" is + * allowed, but "foo/../bar.bin" is not. * * Caller must hold the reference count of @device. * diff --git a/drivers/base/module.c b/drivers/base/module.c index b0b79b9c189d..0d5c5da367f7 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, struct device_driver *drv) driver_name = make_driver_name(drv); if (!driver_name) { ret = -ENOMEM; - goto out; + goto out_remove_kobj; }
module_create_drivers_dir(mk); if (!mk->drivers_dir) { ret = -EINVAL; - goto out; + goto out_free_driver_name; }
ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); if (ret) - goto out; + goto out_remove_drivers_dir;
kfree(driver_name);
return 0; -out: - sysfs_remove_link(&drv->p->kobj, "module"); + +out_remove_drivers_dir: sysfs_remove_link(mk->drivers_dir, driver_name); + +out_free_driver_name: kfree(driver_name);
+out_remove_kobj: + sysfs_remove_link(&drv->p->kobj, "module"); return ret; }
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 84443b6bd882..582564f8dde6 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -3135,7 +3135,7 @@ static int genpd_summary_one(struct seq_file *s, else snprintf(state, sizeof(state), "%s", status_lookup[genpd->status]); - seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); + seq_printf(s, "%-30s %-49s %u", genpd->name, state, genpd->performance_state);
/* * Modifications on the list require holding locks on both diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 6bc86106c7b2..102cc3034412 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3392,10 +3392,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local) void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) { unsigned long flags; - if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) + spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); + if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) { + spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); return; + }
- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); if (val == 0) { drbd_uuid_move_history(device); device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 287a8d1d3f70..87cf5883078f 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) ns.disk == D_OUTDATED) rv = SS_CONNECTED_OUTDATES;
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && + else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && (nc->verify_alg[0] == 0)) rv = SS_NO_VERIFY_ALG;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1089dc646b80..96b349148e57 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd) { struct request *req = blk_mq_rq_from_pdu(cmd);
+ lockdep_assert_held(&cmd->lock); + + /* + * Clear INFLIGHT flag so that this cmd won't be completed in + * normal completion path + * + * INFLIGHT flag will be set when the cmd is queued to nbd next + * time. + */ + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) blk_mq_requeue_request(req, true); } @@ -461,8 +472,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); } - mutex_unlock(&cmd->lock); nbd_requeue_cmd(cmd); + mutex_unlock(&cmd->lock); nbd_config_put(nbd); return BLK_EH_DONE; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index bf7f68e90953..9cafbce1faf3 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -68,9 +68,6 @@ struct ublk_rq_data { struct llist_node node;
struct kref ref; - __u64 sector; - __u32 operation; - __u32 nr_zones; };
struct ublk_uring_cmd_pdu { @@ -215,6 +212,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
#ifdef CONFIG_BLK_DEV_ZONED
+struct ublk_zoned_report_desc { + __u64 sector; + __u32 operation; + __u32 nr_zones; +}; + +static DEFINE_XARRAY(ublk_zoned_report_descs); + +static int ublk_zoned_insert_report_desc(const struct request *req, + struct ublk_zoned_report_desc *desc) +{ + return xa_insert(&ublk_zoned_report_descs, (unsigned long)req, + desc, GFP_KERNEL); +} + +static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc( + const struct request *req) +{ + return xa_erase(&ublk_zoned_report_descs, (unsigned long)req); +} + +static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc( + const struct request *req) +{ + return xa_load(&ublk_zoned_report_descs, (unsigned long)req); +} + static int ublk_get_nr_zones(const struct ublk_device *ub) { const struct ublk_param_basic *p = &ub->params.basic; @@ -321,7 +345,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, unsigned int zones_in_request = min_t(unsigned int, remaining_zones, max_zones_per_request); struct request *req; - struct ublk_rq_data *pdu; + struct ublk_zoned_report_desc desc; blk_status_t status;
memset(buffer, 0, buffer_length); @@ -332,20 +356,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, goto out; }
- pdu = blk_mq_rq_to_pdu(req); - pdu->operation = UBLK_IO_OP_REPORT_ZONES; - pdu->sector = sector; - pdu->nr_zones = zones_in_request; + desc.operation = UBLK_IO_OP_REPORT_ZONES; + desc.sector = sector; + desc.nr_zones = zones_in_request; + ret = ublk_zoned_insert_report_desc(req, &desc); + if (ret) + goto free_req;
ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, GFP_KERNEL); - if (ret) { - blk_mq_free_request(req); - goto out; - } + if (ret) + goto erase_desc;
status = blk_execute_rq(req, 0); ret = blk_status_to_errno(status); +erase_desc: + ublk_zoned_erase_report_desc(req); +free_req: blk_mq_free_request(req); if (ret) goto out; @@ -379,7 +406,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, { struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); struct ublk_io *io = &ubq->ios[req->tag]; - struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req); + struct ublk_zoned_report_desc *desc; u32 ublk_op;
switch (req_op(req)) { @@ -402,12 +429,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, ublk_op = UBLK_IO_OP_ZONE_RESET_ALL; break; case REQ_OP_DRV_IN: - ublk_op = pdu->operation; + desc = ublk_zoned_get_report_desc(req); + if (!desc) + return BLK_STS_IOERR; + ublk_op = desc->operation; switch (ublk_op) { case UBLK_IO_OP_REPORT_ZONES: iod->op_flags = ublk_op | ublk_req_build_flags(req); - iod->nr_zones = pdu->nr_zones; - iod->start_sector = pdu->sector; + iod->nr_zones = desc->nr_zones; + iod->start_sector = desc->sector; return BLK_STS_OK; default: return BLK_STS_IOERR; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c495fceda20a..0a58106207b0 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1352,7 +1352,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) if (!urb) return -ENOMEM;
- size = le16_to_cpu(data->intr_ep->wMaxPacketSize); + /* Use maximum HCI Event size so the USB stack handles + * ZPL/short-transfer automatically. + */ + size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags); if (!buf) { diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c index b715c8ab36e8..a65c79b08804 100644 --- a/drivers/bus/arm-integrator-lm.c +++ b/drivers/bus/arm-integrator-lm.c @@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev) return -ENODEV; } map = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(map)) { dev_err(dev, "could not find Integrator/AP system controller\n"); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 08f3f039dbdd..154841916f56 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -578,6 +578,15 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = { .mru_default = 32768, };
+static const struct mhi_pci_dev_info mhi_telit_fe990a_info = { + .name = "telit-fe990a", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -595,9 +604,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* Telit FN990 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, - /* Telit FE990 */ + /* Telit FE990A */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), - .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 4c08efe7f375..57a80ec93bad 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) return ret;
ret = reset_control_reset(priv->reset); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + }
if (priv->mask_interrupts) { /* mask the interrupt */ diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 1abbff04a015..a55f5f2d35df 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -624,6 +624,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; }
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index aa993753ab12..1e3048f2bb38 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 30b4c288c1bb..c3fbbf4d3db7 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret) ret = tpm2_commit_space(chip, space, buf, &len); + else + tpm2_flush_space(chip);
out_rc: return ret ? ret : len; diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 363afdd4d1d3..d4d1007fe811 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -166,6 +166,9 @@ void tpm2_flush_space(struct tpm_chip *chip) struct tpm_space *space = &chip->work_space; int i;
+ if (!space) + return; + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context(chip, space->context_tbl[i]); diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index 91b5c6f14819..4e9594714b14 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -66,6 +66,7 @@ enum pll_component_id { PLL_COMPID_FRAC, PLL_COMPID_DIV0, PLL_COMPID_DIV1, + PLL_COMPID_MAX, };
/* @@ -165,7 +166,7 @@ static struct sama7g5_pll { u8 t; u8 eid; u8 safe_div; -} sama7g5_plls[][PLL_ID_MAX] = { +} sama7g5_plls[][PLL_COMPID_MAX] = { [PLL_ID_CPU] = { [PLL_COMPID_FRAC] = { .n = "cpupll_fracck", @@ -1038,7 +1039,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np) sama7g5_pmc->chws[PMC_MAIN] = hw;
for (i = 0; i < PLL_ID_MAX; i++) { - for (j = 0; j < 3; j++) { + for (j = 0; j < PLL_COMPID_MAX; j++) { struct clk_hw *parent_hw;
if (!sama7g5_plls[i][j].n) diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c index e208ddc51133..db7f40b07d1a 100644 --- a/drivers/clk/imx/clk-composite-7ulp.c +++ b/drivers/clk/imx/clk-composite-7ulp.c @@ -14,6 +14,7 @@ #include "../clk-fractional-divider.h" #include "clk.h"
+#define PCG_PR_MASK BIT(31) #define PCG_PCS_SHIFT 24 #define PCG_PCS_MASK 0x7 #define PCG_CGC_SHIFT 30 @@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name, struct clk_hw *hw; u32 val;
+ val = readl(reg); + if (!(val & PCG_PR_MASK)) { + pr_info("PCC PR is 0 for clk:%s, bypass\n", name); + return 0; + } + if (mux_present) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 27a08c50ac1d..ac5e9d60acb8 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = { .determine_rate = imx8m_clk_composite_mux_determine_rate, };
+static int imx8m_clk_composite_gate_enable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + unsigned long flags; + u32 val; + + spin_lock_irqsave(gate->lock, flags); + + val = readl(gate->reg); + val |= BIT(gate->bit_idx); + writel(val, gate->reg); + + spin_unlock_irqrestore(gate->lock, flags); + + return 0; +} + +static void imx8m_clk_composite_gate_disable(struct clk_hw *hw) +{ + /* composite clk requires the disable hook */ +} + +static const struct clk_ops imx8m_clk_composite_gate_ops = { + .enable = imx8m_clk_composite_gate_enable, + .disable = imx8m_clk_composite_gate_disable, + .is_enabled = clk_gate_is_enabled, +}; + struct clk_hw *__imx8m_clk_hw_composite(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, @@ -217,10 +245,11 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, struct clk_mux *mux = NULL; const struct clk_ops *divider_ops; const struct clk_ops *mux_ops; + const struct clk_ops *gate_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) - goto fail; + return ERR_CAST(hw);
mux_hw = &mux->hw; mux->reg = reg; @@ -230,7 +259,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name,
div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) - goto fail; + goto free_mux;
div_hw = &div->hw; div->reg = reg; @@ -257,28 +286,32 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, div->flags = CLK_DIVIDER_ROUND_CLOSEST;
/* skip registering the gate ops if M4 is enabled */ - if (!mcore_booted) { - gate = kzalloc(sizeof(*gate), GFP_KERNEL); - if (!gate) - goto fail; - - gate_hw = &gate->hw; - gate->reg = reg; - gate->bit_idx = PCG_CGC_SHIFT; - gate->lock = &imx_ccm_lock; - } + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + goto free_div; + + gate_hw = &gate->hw; + gate->reg = reg; + gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; + if (!mcore_booted) + gate_ops = &clk_gate_ops; + else + gate_ops = &imx8m_clk_composite_gate_ops;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, mux_ops, div_hw, - divider_ops, gate_hw, &clk_gate_ops, flags); + divider_ops, gate_hw, gate_ops, flags); if (IS_ERR(hw)) - goto fail; + goto free_gate;
return hw;
-fail: +free_gate: kfree(gate); +free_div: kfree(div); +free_mux: kfree(mux); return ERR_CAST(hw); } diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c index 81164bdcd6cc..6c6c5a30f328 100644 --- a/drivers/clk/imx/clk-composite-93.c +++ b/drivers/clk/imx/clk-composite-93.c @@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
static void imx93_clk_composite_gate_disable(struct clk_hw *hw) { + /* + * Skip disable the root clock gate if mcore enabled. + * The root clock may be used by the mcore. + */ + if (mcore_booted) + return; + imx93_clk_composite_gate_endisable(hw, 0); }
@@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ro_ops, div_hw, &clk_divider_ro_ops, NULL, NULL, flags); - } else if (!mcore_booted) { + } else { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto fail; @@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p &imx93_clk_composite_divider_ops, gate_hw, &imx93_clk_composite_gate_ops, flags | CLK_SET_RATE_NO_REPARENT); - } else { - hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, - mux_hw, &imx93_clk_composite_mux_ops, div_hw, - &imx93_clk_composite_divider_ops, NULL, - &imx93_clk_composite_gate_ops, - flags | CLK_SET_RATE_NO_REPARENT); }
if (IS_ERR(hw)) diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c index 44462ab50e51..1becba2b62d0 100644 --- a/drivers/clk/imx/clk-fracn-gppll.c +++ b/drivers/clk/imx/clk-fracn-gppll.c @@ -291,6 +291,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw) if (val & POWERUP_MASK) return 0;
+ if (pll->flags & CLK_FRACN_GPPLL_FRACN) + writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR), + pll->base + PLL_NUMERATOR); + val |= CLKMUX_BYPASS; writel_relaxed(val, pll->base + PLL_CTRL);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index f9394e94f69d..05c7a82b751f 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk); - clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk); + clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk); + clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
imx_register_uart_clocks(); } diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index 55ed211a5e0b..ab2a028b3027 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -146,6 +146,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = { PDM_SEL, 2, 0 \ }
+#define CLK_GATE_PARENT(gname, cname, pname) \ + { \ + gname"_cg", \ + IMX8MP_CLK_AUDIOMIX_##cname, \ + { .fw_name = pname, .name = pname }, NULL, 1, \ + CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \ + 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \ + } + struct clk_imx8mp_audiomix_sel { const char *name; int clkid; @@ -163,14 +172,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_GATE("earc", EARC_IPG), CLK_GATE("ocrama", OCRAMA_IPG), CLK_GATE("aud2htx", AUD2HTX_IPG), - CLK_GATE("earc_phy", EARC_PHY), + CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("spba2", SPBA2_ROOT), CLK_GATE("dsp", DSP_ROOT), CLK_GATE("dspdbg", DSPDBG_ROOT), CLK_GATE("edma", EDMA_ROOT), - CLK_GATE("audpll", AUDPLL_ROOT), + CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), CLK_GATE("mu2", MU2_ROOT), CLK_GATE("mu3", MU3_ROOT), CLK_PDM, diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 670aa2bab301..e561ff7b135f 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -551,8 +551,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); - hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); + hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); + hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100); hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180); hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200); diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c index 245761e01897..6d458995f388 100644 --- a/drivers/clk/imx/clk-imx8qxp.c +++ b/drivers/clk/imx/clk-imx8qxp.c @@ -165,8 +165,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER); imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER); - imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
/* Audio SS */ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL); @@ -199,18 +199,18 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
/* Display controller SS */ - imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); - imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); + imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); - imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); + imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
/* MIPI-LVDS SS */ diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 1701cce74df7..7464cf64803f 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1757,6 +1757,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = { }; EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+/** + * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll + * + * @pll: clk alpha pll + * @regmap: register map + * @config: configuration to apply for pll + */ +void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + /* + * If the bootloader left the PLL enabled it's likely that there are + * RCGs that will lock up if we disable the PLL below. + */ + if (trion_pll_is_enabled(pll, regmap)) { + pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n"); + return; + } + + clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); + regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); + clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), + config->config_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), + config->config_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), + config->config_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), + config->user_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), + config->user_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), + config->user_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), + config->test_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), + config->test_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), + config->test_ctl_hi1_val); + + /* Disable PLL output */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); + + /* Set operation mode to OFF */ + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); + + /* Place the PLL in STANDBY mode */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); +} +EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure); + static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 903fbab9b58e..3fd0ef41c72c 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -198,6 +198,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c index e17bb8b543b5..9a9e0852c91f 100644 --- a/drivers/clk/qcom/dispcc-sm8250.c +++ b/drivers/clk/qcom/dispcc-sm8250.c @@ -1359,8 +1359,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev) disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL; }
- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); - clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) { + clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + } else { + clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + }
/* Enable clock gating for MDP clocks */ regmap_update_bits(regmap, 0x8000, 0x10, 0x10); diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c index b9edeb2a221d..95b4c0548f50 100644 --- a/drivers/clk/qcom/dispcc-sm8550.c +++ b/drivers/clk/qcom/dispcc-sm8550.c @@ -196,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = { static const struct parent_map disp_cc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, - { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, @@ -213,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = {
static const struct parent_map disp_cc_parent_map_5[] = { { P_BI_TCXO, 0 }, - { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, };
@@ -400,7 +400,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, + .ops = &clk_rcg2_ops, }, };
@@ -562,7 +562,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -577,7 +577,7 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -1611,7 +1611,7 @@ static struct gdsc mdss_gdsc = { .name = "mdss_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, };
static struct gdsc mdss_int2_gdsc = { @@ -1620,7 +1620,7 @@ static struct gdsc mdss_int2_gdsc = { .name = "mdss_int2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, };
static struct clk_regmap *disp_cc_sm8550_clocks[] = { diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c index f98591148a97..6a4877d88829 100644 --- a/drivers/clk/qcom/gcc-ipq5332.c +++ b/drivers/clk/qcom/gcc-ipq5332.c @@ -3388,6 +3388,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { [GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr, [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr, [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr, + [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr, [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr, [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr, [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr, diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index a24a35553e13..7343d2d7676b 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(29), 0, 3, DFLAGS), DIV(0, "sclk_vop_pre", "sclk_vop_src", 0, RK2928_CLKSEL_CON(27), 8, 8, DFLAGS), - MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0, + MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2), diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c index 6994165e0395..d8ffcaefa480 100644 --- a/drivers/clk/rockchip/clk-rk3588.c +++ b/drivers/clk/rockchip/clk-rk3588.c @@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" }; PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" }; PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" }; PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; -PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" }; +PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" }; PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" }; PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" }; diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c index 10cc1ec43925..36340ca42cc7 100644 --- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c +++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c @@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
/* enable power domain and clocks */ pm_runtime_enable(priv->dev); - ret = pm_runtime_get_sync(priv->dev); + ret = pm_runtime_resume_and_get(priv->dev); if (ret < 0) return dev_err_probe(priv->dev, ret, "failed to turn on power\n");
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index d964e3affd42..0eab7f3e2eab 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) }
clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); if (IS_ERR(clk)) { pr_err("%s: failed to get atl clock %d from provider\n", __func__, i); diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c index b4afe3a67583..eac4c95c6127 100644 --- a/drivers/clocksource/timer-qcom.c +++ b/drivers/clocksource/timer-qcom.c @@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np) }
if (of_property_read_u32(np, "clock-frequency", &freq)) { + iounmap(cpu0_base); pr_err("Unknown frequency\n"); return -EINVAL; } @@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np) freq /= 4; writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- return msm_timer_init(freq, 32, irq, !!percpu_offset); + ret = msm_timer_init(freq, 32, irq, !!percpu_offset); + if (ret) + iounmap(cpu0_base); + + return ret; } TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index d88ee87b1cd6..cb5d1c8fefeb 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -61,6 +61,9 @@ struct ti_cpufreq_soc_data { unsigned long efuse_shift; unsigned long rev_offset; bool multi_regulator; +/* Backward compatibility hack: Might have missing syscon */ +#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1 + u8 quirks; };
struct ti_cpufreq_data { @@ -182,6 +185,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = { .efuse_mask = BIT(3), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, };
/* @@ -209,6 +213,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = { .efuse_mask = BIT(9), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = true, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, };
/* @@ -223,6 +228,7 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { .efuse_mask = 0, .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, };
static struct ti_cpufreq_soc_data am625_soc_data = { @@ -250,7 +256,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, &efuse); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->efuse_offset, 4); @@ -291,7 +297,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset, &revision); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->rev_offset, 4); diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index e8094fc92491..c0fe92409175 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -8,6 +8,7 @@
#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+#include <linux/cleanup.h> #include <linux/cpuhotplug.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> @@ -267,19 +268,16 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, { struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); struct device_node *state_node; - struct device_node *cpu_node; u32 *states; int i, ret;
- cpu_node = of_cpu_device_node_get(cpu); + struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV;
states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); - if (!states) { - ret = -ENOMEM; - goto fail; - } + if (!states) + return -ENOMEM;
/* Parse SBI specific details from state DT nodes */ for (i = 1; i < state_count; i++) { @@ -295,10 +293,8 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
pr_debug("sbi-state %#x index %d\n", states[i], i); } - if (i != state_count) { - ret = -ENODEV; - goto fail; - } + if (i != state_count) + return -ENODEV;
/* Initialize optional data, used for the hierarchical topology. */ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); @@ -308,10 +304,7 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, /* Store states in the per-cpu struct. */ data->states = states;
-fail: - of_node_put(cpu_node); - - return ret; + return 0; }
static void sbi_cpuidle_deinit_cpu(int cpu) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 290c8500c247..65785dc5b73b 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc;
+ sg_num = pad_sg_nents(sg_num); edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); if (!edesc) return NULL; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 17fb01853dbf..07e6f782b622 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1367,6 +1367,8 @@ void sev_pci_init(void) return;
err: + sev_dev_destroy(psp_master); + psp_master->sev_data = NULL; }
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index b97ce0ee7140..3463f5ee83c0 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -13,9 +13,7 @@ #include <linux/uacce.h> #include "hpre.h"
-#define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) -#define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_CTRL_CNT_CLR_CE 0x301000 #define HPRE_FSM_MAX_CNT 0x301008 #define HPRE_VFG_AXQOS 0x30100c @@ -42,7 +40,6 @@ #define HPRE_HAC_INT_SET 0x301500 #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 -#define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 #define HPRE_CLSTR_BASE 0x302000 #define HPRE_CORE_EN_OFFSET 0x04 @@ -66,7 +63,6 @@ #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 -#define HPRE_TIMEOUT_ABNML_BIT 6 #define HPRE_PASID_EN_BIT 9 #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 @@ -202,9 +198,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, - {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, - {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, - {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E}, + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, @@ -357,6 +353,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, };
+static const struct hisi_qm_err_ini hpre_err_ini; + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; @@ -653,11 +651,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
- /* HPRE need more time, we close this interrupt */ - val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); - val |= BIT(HPRE_TIMEOUT_ABNML_BIT); - writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); - if (qm->ver >= QM_HW_V3) writel(HPRE_RSA_ENB | HPRE_ECC_ENB, qm->io_base + HPRE_TYPES_ENB); @@ -666,9 +659,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); writel(0x0, qm->io_base + HPRE_BD_ENDIAN); - writel(0x0, qm->io_base + HPRE_INT_MASK); writel(0x0, qm->io_base + HPRE_POISON_BYPASS); - writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); @@ -758,7 +749,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + u32 ce, nfe, err_en;
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); @@ -775,7 +766,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */ - writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; + writel(~err_en, qm->io_base + HPRE_INT_MASK); }
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -1161,6 +1153,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + qm->err_ini = &hpre_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } @@ -1350,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
hpre_open_sva_prefetch(qm);
- qm->err_ini = &hpre_err_ini; - qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) @@ -1380,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre) return 0; }
+static void hpre_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); + hpre_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; @@ -1405,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm); if (ret) - goto err_with_err_init; + goto err_with_probe_init;
ret = hpre_debugfs_init(qm); if (ret) @@ -1442,9 +1445,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
-err_with_err_init: - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_with_probe_init: + hpre_probe_uninit(qm);
err_with_qm_init: hisi_qm_uninit(qm); @@ -1465,13 +1467,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
- if (qm->fun_type == QM_HW_PF) { - hpre_cnt_regs_clear(qm); - qm->debug.curr_qm_qp_num = 0; - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); - } - + hpre_probe_uninit(qm); hisi_qm_uninit(qm); }
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 562df5c77c63..1b00edbbfe26 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -455,6 +455,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { };
static void qm_irqs_unregister(struct hisi_qm *qm); +static int qm_reset_device(struct hisi_qm *qm);
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) { @@ -4105,6 +4106,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set) return -ETIMEDOUT; }
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ + if (qm->ver >= QM_HW_V3) + return; + + if (!qm->err_status.is_dev_ecc_mbit && + qm->err_status.is_qm_ecc_mbit && + qm->err_ini->close_axi_master_ooo) { + qm->err_ini->close_axi_master_ooo(qm); + } else if (qm->err_status.is_dev_ecc_mbit && + !qm->err_status.is_qm_ecc_mbit && + !qm->err_ini->close_axi_master_ooo) { + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + } +} + static int qm_vf_reset_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { @@ -4169,6 +4192,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; }
+ qm_dev_ecc_mbit_handle(qm); + /* PF obtains the information of VF by querying the register. */ qm_cmd_uninit(qm);
@@ -4199,33 +4224,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return 0; }
-static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +static int qm_master_ooo_check(struct hisi_qm *qm) { - u32 nfe_enb = 0; + u32 val; + int ret;
- /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ - if (qm->ver >= QM_HW_V3) - return; + /* Check the ooo register of the device before resetting the device. */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
- if (!qm->err_status.is_dev_ecc_mbit && - qm->err_status.is_qm_ecc_mbit && - qm->err_ini->close_axi_master_ooo) { - qm->err_ini->close_axi_master_ooo(qm); - } else if (qm->err_status.is_dev_ecc_mbit && - !qm->err_status.is_qm_ecc_mbit && - !qm->err_ini->close_axi_master_ooo) { - nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); - writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, - qm->io_base + QM_RAS_NFE_ENABLE); - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); - } + return ret; }
-static int qm_soft_reset(struct hisi_qm *qm) +static int qm_soft_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val;
/* Ensure all doorbells and mailboxes received by QM */ ret = qm_check_req_recv(qm); @@ -4246,30 +4264,23 @@ static int qm_soft_reset(struct hisi_qm *qm) return ret; }
- qm_dev_ecc_mbit_handle(qm); - - /* OOO register set and check */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - /* If bus lock, reset chip */ - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - }
if (qm->err_ini->close_sva_prefetch) qm->err_ini->close_sva_prefetch(qm);
ret = qm_set_pf_mse(qm, false); - if (ret) { + if (ret) pci_err(pdev, "Fails to disable pf MSE bit.\n"); - return ret; - } + + return ret; +} + +static int qm_reset_device(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev;
/* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(&pdev->dev)) { @@ -4288,12 +4299,23 @@ static int qm_soft_reset(struct hisi_qm *qm) pci_err(pdev, "Reset step %llu failed!\n", value); return -EIO; } - } else { - pci_err(pdev, "No reset method!\n"); - return -EINVAL; + + return 0; }
- return 0; + pci_err(pdev, "No reset method!\n"); + return -EINVAL; +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + int ret; + + ret = qm_soft_reset_prepare(qm); + if (ret) + return ret; + + return qm_reset_device(qm); }
static int qm_vf_reset_done(struct hisi_qm *qm) @@ -5261,6 +5283,35 @@ static int qm_get_pci_res(struct hisi_qm *qm) return ret; }
+static int qm_clear_device(struct hisi_qm *qm) +{ + acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + /* Device does not support reset, return */ + if (!qm->err_ini->err_info_init) + return 0; + qm->err_ini->err_info_init(qm); + + if (!handle) + return 0; + + /* No reset method, return */ + if (!acpi_has_method(handle, qm->err_info.acpi_rst)) + return 0; + + ret = qm_master_ooo_check(qm); + if (ret) { + writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + return ret; + } + + return qm_reset_device(qm); +} + static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5290,8 +5341,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; }
+ ret = qm_clear_device(qm); + if (ret) + goto err_free_vectors; + return 0;
+err_free_vectors: + pci_free_irq_vectors(pdev); err_get_pci_res: qm_put_pci_res(qm); err_disable_pcidev: @@ -5557,7 +5614,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val;
ret = qm->ops->set_msi(qm, false); if (ret) { @@ -5565,18 +5621,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) return ret; }
- /* shutdown OOO register */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - }
ret = qm_set_pf_mse(qm, false); if (ret) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index bf02a6b2eed4..cf7b6a37e7df 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1061,9 +1061,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret;
- qm->err_ini = &sec_err_ini; - qm->err_ini->err_info_init(qm); - ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -1118,6 +1115,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + qm->err_ini = &sec_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1182,6 +1180,12 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm) { + if (qm->fun_type == QM_HW_VF) + return; + + sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); + sec_close_sva_prefetch(qm); hisi_qm_dev_err_uninit(qm); }
@@ -1274,7 +1278,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: - sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev) sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL); - - if (qm->fun_type == QM_HW_PF) - sec_debug_regs_clear(qm); - sec_show_last_regs_uninit(qm); - sec_probe_uninit(qm);
sec_qm_uninit(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index cd7ecb2180bf..9d47b3675da7 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1150,8 +1150,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - qm->err_ini = &hisi_zip_err_ini; - qm->err_ini->err_info_init(qm);
ret = hisi_zip_set_user_domain_and_cache(qm); if (ret) @@ -1212,6 +1210,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + qm->err_ini = &hisi_zip_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1278,6 +1277,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) return 0; }
+static void hisi_zip_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hisi_zip_show_last_regs_uninit(qm); + hisi_zip_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -1304,7 +1313,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm); if (ret) - goto err_dev_err_uninit; + goto err_probe_uninit;
ret = hisi_zip_debugfs_init(qm); if (ret) @@ -1341,9 +1350,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
-err_dev_err_uninit: - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_probe_uninit: + hisi_zip_probe_uninit(qm);
err_qm_uninit: hisi_zip_qm_uninit(qm); @@ -1364,8 +1372,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); + hisi_zip_probe_uninit(qm); hisi_zip_qm_uninit(qm); }
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c963cd9e88d1..6edfd0546673 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -388,10 +388,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; if (!size) { - info->dvsec_range[i] = (struct range) { - .start = 0, - .end = CXL_RESOURCE_NONE, - }; continue; }
@@ -409,12 +405,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
- info->dvsec_range[i] = (struct range) { + info->dvsec_range[ranges++] = (struct range) { .start = base, .end = base + size - 1 }; - - ranges++; }
info->ranges = ranges; diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index a2984e9bed3a..a0edb61a5a01 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -245,7 +245,7 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc) if (igen6_tom <= _4GB) return eaddr + igen6_tolud - _4GB;
- if (eaddr < _4GB) + if (eaddr >= igen6_tom) return eaddr + igen6_tolud - igen6_tom;
return eaddr; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index c4fc64cbecd0..6ddc90d7ba7c 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -9,6 +9,8 @@ #include <linux/edac.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/sizes.h> #include <linux/interrupt.h> #include <linux/of.h>
@@ -299,6 +301,7 @@ struct synps_ecc_status { /** * struct synps_edac_priv - DDR memory controller private instance data. * @baseaddr: Base address of the DDR controller. + * @reglock: Concurrent CSRs access lock. * @message: Buffer for framing the event specific info. * @stat: ECC status information. * @p_data: Platform data. @@ -313,6 +316,7 @@ struct synps_ecc_status { */ struct synps_edac_priv { void __iomem *baseaddr; + spinlock_t reglock; char message[SYNPS_EDAC_MSG_SIZE]; struct synps_ecc_status stat; const struct synps_platform_data *p_data; @@ -334,6 +338,7 @@ struct synps_edac_priv { * @get_mtype: Get mtype. * @get_dtype: Get dtype. * @get_ecc_state: Get ECC state. + * @get_mem_info: Get EDAC memory info * @quirks: To differentiate IPs. */ struct synps_platform_data { @@ -341,6 +346,9 @@ struct synps_platform_data { enum mem_type (*get_mtype)(const void __iomem *base); enum dev_type (*get_dtype)(const void __iomem *base); bool (*get_ecc_state)(void __iomem *base); +#ifdef CONFIG_EDAC_DEBUG + u64 (*get_mem_info)(struct synps_edac_priv *priv); +#endif int quirks; };
@@ -399,6 +407,25 @@ static int zynq_get_error_info(struct synps_edac_priv *priv) return 0; }
+#ifdef CONFIG_EDAC_DEBUG +/** + * zynqmp_get_mem_info - Get the current memory info. + * @priv: DDR memory controller private instance data. + * + * Return: host interface address. + */ +static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv) +{ + u64 hif_addr = 0, linear_addr; + + linear_addr = priv->poison_addr; + if (linear_addr >= SZ_32G) + linear_addr = linear_addr - SZ_32G + SZ_2G; + hif_addr = linear_addr >> 3; + return hif_addr; +} +#endif + /** * zynqmp_get_error_info - Get the current ECC error info. * @priv: DDR memory controller private instance data. @@ -408,7 +435,8 @@ static int zynq_get_error_info(struct synps_edac_priv *priv) static int zynqmp_get_error_info(struct synps_edac_priv *priv) { struct synps_ecc_status *p; - u32 regval, clearval = 0; + u32 regval, clearval; + unsigned long flags; void __iomem *base;
base = priv->baseaddr; @@ -452,10 +480,14 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv) p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK); p->ueinfo.data = readl(base + ECC_UESYND0_OFST); out: - clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT; - clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; + spin_lock_irqsave(&priv->reglock, flags); + + clearval = readl(base + ECC_CLR_OFST) | + ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | + ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; writel(clearval, base + ECC_CLR_OFST); - writel(0x0, base + ECC_CLR_OFST); + + spin_unlock_irqrestore(&priv->reglock, flags);
return 0; } @@ -515,24 +547,41 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
static void enable_intr(struct synps_edac_priv *priv) { + unsigned long flags; + /* Enable UE/CE Interrupts */ - if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) - writel(DDR_UE_MASK | DDR_CE_MASK, - priv->baseaddr + ECC_CLR_OFST); - else + if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) { writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+ return; + } + + spin_lock_irqsave(&priv->reglock, flags); + + writel(DDR_UE_MASK | DDR_CE_MASK, + priv->baseaddr + ECC_CLR_OFST); + + spin_unlock_irqrestore(&priv->reglock, flags); }
static void disable_intr(struct synps_edac_priv *priv) { + unsigned long flags; + /* Disable UE/CE Interrupts */ - if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) - writel(0x0, priv->baseaddr + ECC_CLR_OFST); - else + if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) { writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, priv->baseaddr + DDR_QOS_IRQ_DB_OFST); + + return; + } + + spin_lock_irqsave(&priv->reglock, flags); + + writel(0, priv->baseaddr + ECC_CLR_OFST); + + spin_unlock_irqrestore(&priv->reglock, flags); }
/** @@ -576,8 +625,6 @@ static irqreturn_t intr_handler(int irq, void *dev_id) /* v3.0 of the controller does not have this register */ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); - else - enable_intr(priv);
return IRQ_HANDLED; } @@ -899,6 +946,9 @@ static const struct synps_platform_data zynqmp_edac_def = { .get_mtype = zynqmp_get_mtype, .get_dtype = zynqmp_get_dtype, .get_ecc_state = zynqmp_get_ecc_state, +#ifdef CONFIG_EDAC_DEBUG + .get_mem_info = zynqmp_get_mem_info, +#endif .quirks = (DDR_ECC_INTR_SUPPORT #ifdef CONFIG_EDAC_DEBUG | DDR_ECC_DATA_POISON_SUPPORT @@ -952,10 +1002,16 @@ MODULE_DEVICE_TABLE(of, synps_edac_match); static void ddr_poison_setup(struct synps_edac_priv *priv) { int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval; + const struct synps_platform_data *p_data; int index; ulong hif_addr = 0;
- hif_addr = priv->poison_addr >> 3; + p_data = priv->p_data; + + if (p_data->get_mem_info) + hif_addr = p_data->get_mem_info(priv); + else + hif_addr = priv->poison_addr >> 3;
for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) { if (priv->row_shift[index]) @@ -1359,6 +1415,7 @@ static int mc_probe(struct platform_device *pdev) priv = mci->pvt_info; priv->baseaddr = baseaddr; priv->p_data = p_data; + spin_lock_init(&priv->reglock);
mc_init(mci, pdev);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 6274b86eb943..73cc2f2dcbf9 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -598,11 +598,11 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
break; + } default: WARN_ON(1); break; } - }
/* Drop the idr's reference */ client_put(client); diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index e123de6e8c67..aa02392265d3 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -467,6 +467,13 @@ static int scmi_optee_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_optee_channel *channel = cinfo->transport_info;
+ /* + * Different protocols might share the same chan info, so a previous + * call might have already freed the structure. + */ + if (!channel) + return 0; + mutex_lock(&scmi_optee_private->mu); list_del(&channel->link); mutex_unlock(&scmi_optee_private->mu); diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index 7acbac16eae0..95da291c3083 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -115,7 +115,7 @@ void efi_retrieve_tpm2_eventlog(void) }
/* Allocate space for the logs and copy them. */ - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 104a5ad8397d..198687545407 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -209,7 +209,7 @@ struct amd_sriov_msg_pf2vf_info { uint32_t pcie_atomic_ops_support_flags; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; -}; +} __packed;
struct amd_sriov_msg_vf2pf_info_header { /* the total structure size in byte */ @@ -267,7 +267,7 @@ struct amd_sriov_msg_vf2pf_info {
/* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE]; -}; +} __packed;
/* mailbox message send from guest to host */ enum amd_sriov_mailbox_request_message { diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index d95b2dc78063..157e898dc382 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -2065,26 +2065,29 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; - int edid_size = - max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); - edid = kmalloc(edid_size, GFP_KERNEL); + int edid_size; + + if (fake_edid_record->ucFakeEDIDLength == 128) + edid_size = fake_edid_record->ucFakeEDIDLength; + else + edid_size = fake_edid_record->ucFakeEDIDLength * 128; + edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0], + edid_size, GFP_KERNEL); if (edid) { - memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], - fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) { adev->mode_info.bios_hardcoded_edid = edid; adev->mode_info.bios_hardcoded_edid_size = edid_size; - } else + } else { kfree(edid); + } } + record += struct_size(fake_edid_record, + ucFakeEDIDString, + edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; } - record += fake_edid_record->ucFakeEDIDLength ? - struct_size(fake_edid_record, - ucFakeEDIDString, - fake_edid_record->ucFakeEDIDLength) : - /* empty fake edid record must be 3 bytes long */ - sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f0ebf686b06f..7731ce7762b7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4037,6 +4037,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, @@ -4051,6 +4052,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, return;
amdgpu_acpi_get_backlight_caps(&caps); + + /* validate the firmware value is sane */ + if (caps.caps_valid) { + int spread = caps.max_input_signal - caps.min_input_signal; + + if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps.min_input_signal < AMDGPU_DM_DEFAULT_MIN_BACKLIGHT || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", + caps.min_input_signal, caps.max_input_signal); + caps.caps_valid = false; + } + } + if (caps.caps_valid) { dm->backlight_caps[bl_idx].caps_valid = true; if (caps.aux_support) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 3880ddf1c820..7fb11445a28f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -246,7 +246,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */ - if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) + if (is_synaptics_cascaded_panamera(aconnector->dc_link, port)) aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux) @@ -1115,7 +1115,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; - dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -1266,6 +1266,9 @@ static bool is_dsc_need_re_compute( } }
+ if (new_stream_on_link_num == 0) + return false; + if (new_stream_on_link_num == 0) return false;
@@ -1583,7 +1586,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, { struct dc_dsc_policy dsc_policy = {0};
- dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index fe3078b8789e..01c07545ef6b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -100,7 +100,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( */ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy); + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding);
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index ba47a1c8eec1..d59af329d000 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -214,7 +214,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc, } }
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + else + DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__); + return ret; }
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 230be292ff35..9edc9b0e3f08 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -861,7 +861,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy); + dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -1134,7 +1134,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy) + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding) { uint32_t bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ef3a67409021..803586f4267a 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -133,7 +133,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total), 1000000); + stream->timing.h_total) + 500000, 1000000);
/* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 0efcbc73f2a4..5e43a40a5d52 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -411,22 +411,6 @@ static const struct drm_connector_funcs lt8912_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, };
-static enum drm_mode_status -lt8912_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - if (mode->clock > 150000) - return MODE_CLOCK_HIGH; - - if (mode->hdisplay > 1920) - return MODE_BAD_HVALUE; - - if (mode->vdisplay > 1080) - return MODE_BAD_VVALUE; - - return MODE_OK; -} - static int lt8912_connector_get_modes(struct drm_connector *connector) { const struct drm_edid *drm_edid; @@ -452,7 +436,6 @@ static int lt8912_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = { .get_modes = lt8912_connector_get_modes, - .mode_valid = lt8912_connector_mode_valid, };
static void lt8912_bridge_mode_set(struct drm_bridge *bridge, @@ -594,6 +577,23 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) drm_bridge_hpd_disable(lt->hdmi_port); }
+static enum drm_mode_status +lt8912_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 150000) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > 1920) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > 1080) + return MODE_BAD_VVALUE; + + return MODE_OK; +} + static enum drm_connector_status lt8912_bridge_detect(struct drm_bridge *bridge) { @@ -624,6 +624,7 @@ static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge, static const struct drm_bridge_funcs lt8912_bridge_funcs = { .attach = lt8912_bridge_attach, .detach = lt8912_bridge_detach, + .mode_valid = lt8912_bridge_mode_valid, .mode_set = lt8912_bridge_mode_set, .enable = lt8912_bridge_enable, .detect = lt8912_bridge_detect, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 5302bebbe38c..1456abd5b9dd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1173,7 +1173,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev; - ctx->drm_dev = drm_dev; + ipp->drm_dev = drm_dev; exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 29e021ec6901..659112da47b6 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -67,6 +67,8 @@ struct mtk_drm_crtc { /* lock for display hardware access */ struct mutex hw_lock; bool config_updating; + /* lock for config_updating to cmd buffer */ + spinlock_t config_lock; };
struct mtk_crtc_state { @@ -104,11 +106,16 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) { + unsigned long flags; + drm_crtc_handle_vblank(&mtk_crtc->base); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { mtk_drm_crtc_finish_page_flip(mtk_crtc); mtk_crtc->pending_needs_vblank = false; } + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); }
#if IS_REACHABLE(CONFIG_MTK_CMDQ) @@ -291,12 +298,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); struct mtk_crtc_state *state; unsigned int i; + unsigned long flags;
if (data->sta < 0) return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags); + if (mtk_crtc->config_updating) { + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + goto ddp_cmdq_cb_out; + } + state->pending_config = false;
if (mtk_crtc->pending_planes) { @@ -323,6 +337,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) mtk_crtc->pending_async_planes = false; }
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + +ddp_cmdq_cb_out: + mtk_crtc->cmdq_vblank_cnt = 0; wake_up(&mtk_crtc->cb_blocking_queue); } @@ -432,6 +450,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; struct drm_crtc *crtc = &mtk_crtc->base; + unsigned long flags; int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { @@ -463,10 +482,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; - spin_unlock_irq(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } }
@@ -555,9 +574,14 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, struct mtk_drm_private *priv = crtc->dev->dev_private; unsigned int pending_planes = 0, pending_async_planes = 0; int i; + unsigned long flags;
mutex_lock(&mtk_crtc->hw_lock); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = true; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + if (needs_vblank) mtk_crtc->pending_needs_vblank = true;
@@ -611,7 +635,10 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif + spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = false; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + mutex_unlock(&mtk_crtc->hw_lock); }
@@ -1014,6 +1041,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); mutex_init(&mtk_crtc->hw_lock); + spin_lock_init(&mtk_crtc->config_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ) i = priv->mbox_index++; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e5916c106796..8c2758a18a19 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; struct drm_gem_object *obj; uint32_t *ptr, dwords; @@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit } }
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno; a5xx_flush(gpu, ring, true); a5xx_preempt_trigger(gpu);
@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1);
- /* Enable local preemption for finegrain preemption */ + /* + * Disable local preemption by default because it requires + * user-space to be aware of it and provide additional handling + * to restore rendering state or do various flushes on switch. + */ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); - OUT_RING(ring, 0x1); + OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); @@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* Write the fence to the scratch register */ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); OUT_RING(ring, submit->seqno); + a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/* * Execute a CACHE_FLUSH_TS event. This will ensure that the diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index c7187bcc5e90..9c0d701fe4b8 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -34,8 +34,10 @@ struct a5xx_gpu { struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state; + spinlock_t preempt_start_lock; struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index f58dd564d122..0469fea55010 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Return the highest priority ringbuffer with something in it */ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); unsigned long flags; int i;
@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags); empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a5xx_gpu->cur_ring) + empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i]; spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty) @@ -97,12 +101,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) if (gpu->nr_rings == 1) return;
+ /* + * Serialize preemption start to ensure that we always make + * decision on latest state. Otherwise we can get stuck in + * lower priority or empty ring. + */ + spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags); + /* * Try to start preemption by moving from NONE to START. If * unsuccessful, a preemption is already in flight */ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START)) - return; + goto out;
/* Get the next ring to preempt to */ ring = get_next_ring(gpu); @@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) set_preempt_state(a5xx_gpu, PREEMPT_ABORT); update_wptr(gpu, a5xx_gpu->cur_ring); set_preempt_state(a5xx_gpu, PREEMPT_NONE); - return; + goto out; }
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); + /* Make sure the wptr doesn't update while we're in motion */ spin_lock_irqsave(&ring->preempt_lock, flags); a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); @@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1); + return; + +out: + spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); }
void a5xx_preempt_irq(struct msm_gpu *gpu) @@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE); + + /* + * Try to trigger preemption again in case there was a submit or + * retire during ring switch + */ + a5xx_preempt_trigger(gpu); }
void a5xx_preempt_hw_init(struct msm_gpu *gpu) @@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) return;
for (i = 0; i < gpu->nr_rings; i++) { + a5xx_gpu->preempt[i]->data = 0; + a5xx_gpu->preempt[i]->info = 0; a5xx_gpu->preempt[i]->wptr = 0; a5xx_gpu->preempt[i]->rptr = 0; a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; @@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu) } }
+ spin_lock_init(&a5xx_gpu->preempt_start_lock); timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 96deaf85c0cd..4127e2762dcd 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -468,7 +468,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) ret = request_firmware_direct(&fw, fwname, drm->dev); if (!ret) { DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", - newname); + fwname); adreno_gpu->fwloc = FW_LOCATION_LEGACY; goto out; } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index 56a3063545ec..12d07e93a4c4 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -356,7 +356,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
drm_printf(p, "%s:%d\t%d\t%s\n", pipe2name(pipe), j, inuse, - plane ? plane->name : NULL); + plane ? plane->name : "(null)");
total += inuse; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 89a6344bc865..f72ce6a3c456 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config config->pll_clock_inverters = 0x00; else config->pll_clock_inverters = 0x40; - } else { + } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { if (pll_freq <= 1000000000ULL) config->pll_clock_inverters = 0xa0; else if (pll_freq <= 2500000000ULL) @@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config config->pll_clock_inverters = 0x00; else config->pll_clock_inverters = 0x40; + } else { + /* 4.2, 4.3 */ + if (pll_freq <= 1000000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq <= 3500000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; }
config->decimal_div_start = dec; diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 0de79f3a7e3f..820c2c3641d3 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -395,7 +395,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r;
mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1; @@ -433,14 +433,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i return r; }
- offset = track->cb_color_bo_offset[id] << 8; + offset = (u64)track->cb_color_bo_offset[id] << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n", __func__, __LINE__, id, offset, surf.base_align); return -EINVAL; }
- offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->cb_color_bo[id])) { /* old ddx are broken they allocate bo with w*h*bpp but * program slice with ALIGN(h, 8), catch this and patch @@ -448,14 +448,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i */ if (!surf.mode) { uint32_t *ib = p->ib.ptr; - unsigned long tmp, nby, bsize, size, min = 0; + u64 tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */ if (surf.nby > 8) { min = surf.nby - 8; } bsize = radeon_bo_size(track->cb_color_bo[id]); - tmp = track->cb_color_bo_offset[id] << 8; + tmp = (u64)track->cb_color_bo_offset[id] << 8; for (nby = surf.nby; nby > min; nby--) { size = nby * surf.nbx * surf.bpe * surf.nsamples; if ((tmp + size * mslice) <= bsize) { @@ -467,7 +467,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i slice = ((nby * surf.nbx) / 64) - 1; if (!evergreen_surface_check(p, &surf, "cb")) { /* check if this one works */ - tmp += surf.layer_size * mslice; + tmp += (u64)surf.layer_size * mslice; if (tmp <= bsize) { ib[track->cb_color_slice_idx[id]] = slice; goto old_ddx_ok; @@ -476,9 +476,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i } } dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " - "offset %d, max layer %d, bo size %ld, slice %d)\n", + "offset %llu, max layer %d, bo size %ld, slice %d)\n", __func__, __LINE__, id, surf.layer_size, - track->cb_color_bo_offset[id] << 8, mslice, + (u64)track->cb_color_bo_offset[id] << 8, mslice, radeon_bo_size(track->cb_color_bo[id]), slice); dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", __func__, __LINE__, surf.nbx, surf.nby, @@ -562,7 +562,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; @@ -608,18 +608,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) return r; }
- offset = track->db_s_read_offset << 8; + offset = (u64)track->db_s_read_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_read_bo)) { dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_s_read_offset << 8, mslice, + (u64)track->db_s_read_offset << 8, mslice, radeon_bo_size(track->db_s_read_bo)); dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, @@ -627,18 +627,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) return -EINVAL; }
- offset = track->db_s_write_offset << 8; + offset = (u64)track->db_s_write_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_write_bo)) { dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_s_write_offset << 8, mslice, + (u64)track->db_s_write_offset << 8, mslice, radeon_bo_size(track->db_s_write_bo)); return -EINVAL; } @@ -659,7 +659,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; @@ -706,34 +706,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) return r; }
- offset = track->db_z_read_offset << 8; + offset = (u64)track->db_z_read_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_read_bo)) { dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_z_read_offset << 8, mslice, + (u64)track->db_z_read_offset << 8, mslice, radeon_bo_size(track->db_z_read_bo)); return -EINVAL; }
- offset = track->db_z_write_offset << 8; + offset = (u64)track->db_z_write_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_write_bo)) { dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_z_write_offset << 8, mslice, + (u64)track->db_z_write_offset << 8, mslice, radeon_bo_size(track->db_z_write_bo)); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 061396e7fa0f..53c7273eb6a5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1716,26 +1716,29 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; - int edid_size = - max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); - edid = kmalloc(edid_size, GFP_KERNEL); + int edid_size; + + if (fake_edid_record->ucFakeEDIDLength == 128) + edid_size = fake_edid_record->ucFakeEDIDLength; + else + edid_size = fake_edid_record->ucFakeEDIDLength * 128; + edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0], + edid_size, GFP_KERNEL); if (edid) { - memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], - fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; rdev->mode_info.bios_hardcoded_edid_size = edid_size; - } else + } else { kfree(edid); + } } + record += struct_size(fake_edid_record, + ucFakeEDIDString, + edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; } - record += fake_edid_record->ucFakeEDIDLength ? - struct_size(fake_edid_record, - ucFakeEDIDString, - fake_edid_record->ucFakeEDIDLength) : - /* empty fake edid record must be 3 bytes long */ - sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 341550199111..89bc86d62014 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -435,6 +435,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data) HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK, RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK | RK3328_HDMI_HPD_IOE)); + + dw_hdmi_rk3328_read_hpd(dw_hdmi, data); }
static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 4b338cb89d32..c6fbfc0baecc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -381,8 +381,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, if (info->is_yuv) is_yuv = true;
- if (dst_w > 3840) { - DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); + if (dst_w > 4096) { + DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n"); return; }
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index e8523abef27a..4d2db079ad4f 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -203,12 +203,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(ddev, 0); if (ret) - goto err_put; + goto err_unload;
drm_fbdev_dma_setup(ddev, 16);
return 0;
+err_unload: + drv_unload(ddev); err_put: drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 5576fdae4962..5aec1e58c968 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -1580,6 +1580,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev, ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) + ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) * sizeof(*formats), GFP_KERNEL); + if (!formats) + return NULL;
for (i = 0; i < ldev->caps.pix_fmt_nb; i++) { drm_fmt = ldev->caps.pix_fmt_drm[i]; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 643754fa6a8a..c6e986f71a26 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -458,6 +458,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, { struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); enum drm_connector_status status = connector_status_disconnected; + int ret;
/* * NOTE: This function should really take vc4_hdmi->mutex, but @@ -470,7 +471,12 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, * the lock for now. */
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret) { + drm_err_once(connector->dev, "Failed to retain HDMI power domain: %d\n", + ret); + return connector_status_unknown; + }
if (vc4_hdmi->hpd_gpio) { if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index ae796e0c64aa..fdc34283eeb9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -331,6 +331,8 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo) void *virtual; int ret;
+ atomic_inc(&vbo->map_count); + virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); if (virtual) return virtual; @@ -353,11 +355,17 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo) */ void vmw_bo_unmap(struct vmw_bo *vbo) { + int map_count; + if (vbo->map.bo == NULL) return;
- ttm_bo_kunmap(&vbo->map); - vbo->map.bo = NULL; + map_count = atomic_dec_return(&vbo->map_count); + + if (!map_count) { + ttm_bo_kunmap(&vbo->map); + vbo->map.bo = NULL; + } }
@@ -390,6 +398,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv, BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); vmw_bo->tbo.priority = 3; vmw_bo->res_tree = RB_ROOT; + atomic_set(&vmw_bo->map_count, 0);
params->size = ALIGN(params->size, PAGE_SIZE); drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index f349642e6190..156ea612fc2a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -68,6 +68,8 @@ struct vmw_bo_params { * @map: Kmap object for semi-persistent mappings * @res_tree: RB tree of resources using this buffer object as a backing MOB * @res_prios: Eviction priority counts for attached resources + * @map_count: The number of currently active maps. Will differ from the + * cpu_writers because it includes kernel maps. * @cpu_writers: Number of synccpu write grabs. Protected by reservation when * increased. May be decreased without reservation. * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB @@ -86,6 +88,7 @@ struct vmw_bo { struct rb_root res_tree; u32 res_prios[TTM_MAX_BO_PRIORITY];
+ atomic_t map_count; atomic_t cpu_writers; /* Not ref-counted. Protected by binding_mutex */ struct vmw_resource *dx_query_ctx; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 5db26a8af772..18b5cd0234d2 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2368,6 +2368,9 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0); features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3; break; + case WACOM_HID_WD_SEQUENCENUMBER: + wacom_wac->hid_data.sequence_number = -1; + break; } }
@@ -2492,9 +2495,15 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field wacom_wac->hid_data.barrelswitch3 = value; return; case WACOM_HID_WD_SEQUENCENUMBER: - if (wacom_wac->hid_data.sequence_number != value) - hid_warn(hdev, "Dropped %hu packets", (unsigned short)(value - wacom_wac->hid_data.sequence_number)); + if (wacom_wac->hid_data.sequence_number != value && + wacom_wac->hid_data.sequence_number >= 0) { + int sequence_size = field->logical_maximum - field->logical_minimum + 1; + int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size; + hid_warn(hdev, "Dropped %d packets", drop_count); + } wacom_wac->hid_data.sequence_number = value + 1; + if (wacom_wac->hid_data.sequence_number > field->logical_maximum) + wacom_wac->hid_data.sequence_number = field->logical_minimum; return; }
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 57e185f18d53..61073fe81ead 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -324,7 +324,7 @@ struct hid_data { int bat_connected; int ps_connected; bool pad_input_event_flag; - unsigned short sequence_number; + int sequence_number; ktime_t time_delayed; };
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index aa38c45adc09..0ccb5eb596fc 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -79,7 +79,7 @@ static const bool max16065_have_current[] = { };
struct max16065_data { - enum chips type; + enum chips chip; struct i2c_client *client; const struct attribute_group *groups[4]; struct mutex update_lock; @@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range) return limit * range / 256; }
-static inline int MV_TO_LIMIT(int mv, int range) +static inline int MV_TO_LIMIT(unsigned long mv, int range) { - return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255); + mv = clamp_val(mv, 0, ULONG_MAX / 256); + return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range); }
static inline int ADC_TO_CURR(int adc, int gain) @@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev) MAX16065_CURR_SENSE); }
- for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++) + for (i = 0; i < 2; i++) data->fault[i] = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
+ /* + * MAX16067 and MAX16068 have separate undervoltage and + * overvoltage alarm bits. Squash them together. + */ + if (data->chip == max16067 || data->chip == max16068) + data->fault[0] |= data->fault[1]; + data->last_updated = jiffies; data->valid = true; } @@ -493,8 +501,6 @@ static const struct attribute_group max16065_max_group = { .is_visible = max16065_secondary_is_visible, };
-static const struct i2c_device_id max16065_id[]; - static int max16065_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; @@ -505,7 +511,7 @@ static int max16065_probe(struct i2c_client *client) bool have_secondary; /* true if chip has secondary limits */ bool secondary_is_max = false; /* secondary limits reflect max */ int groups = 0; - const struct i2c_device_id *id = i2c_match_id(max16065_id, client); + enum chips chip = (uintptr_t)i2c_get_match_data(client);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_READ_WORD_DATA)) @@ -515,12 +521,13 @@ static int max16065_probe(struct i2c_client *client) if (unlikely(!data)) return -ENOMEM;
+ data->chip = chip; data->client = client; mutex_init(&data->update_lock);
- data->num_adc = max16065_num_adc[id->driver_data]; - data->have_current = max16065_have_current[id->driver_data]; - have_secondary = max16065_have_secondary[id->driver_data]; + data->num_adc = max16065_num_adc[chip]; + data->have_current = max16065_have_current[chip]; + have_secondary = max16065_have_secondary[chip];
if (have_secondary) { val = i2c_smbus_read_byte_data(client, MAX16065_SW_ENABLE); diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index ef75b63f5894..b5352900463f 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -62,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, [NTC_LAST] = { }, }; +MODULE_DEVICE_TABLE(platform, ntc_thermistor_id);
/* * A compensation table should be sorted by the values of .ohm diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 8311e1028ddb..f3312fbcdc0f 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -255,6 +255,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) { tmc_free_table_pages(sg_table); tmc_free_data_pages(sg_table); + kfree(sg_table); } EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@@ -336,7 +337,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, rc = tmc_alloc_table_pages(sg_table); if (rc) { tmc_free_sg_table(sg_table); - kfree(sg_table); return ERR_PTR(rc); }
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 5511fd46a65e..83e6714901b2 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
+/* precondition: bus.lock has been acquired. */ +static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) +{ + bus->master_state = ASPEED_I2C_MASTER_STOP; + writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); +} + static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) { unsigned long time_left, flags; @@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) command);
reinit_completion(&bus->cmd_complete); - writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); + aspeed_i2c_do_stop(bus); spin_unlock_irqrestore(&bus->lock, flags);
time_left = wait_for_completion_timeout( @@ -390,13 +397,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) writel(command, bus->base + ASPEED_I2C_CMD_REG); }
-/* precondition: bus.lock has been acquired. */ -static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) -{ - bus->master_state = ASPEED_I2C_MASTER_STOP; - writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); -} - /* precondition: bus.lock has been acquired. */ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) { diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index 1dc1ceaa4443..8c16c4695574 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -99,8 +99,7 @@ static int sch_transaction(void) if (retries > MAX_RETRIES) { dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; - } - if (temp & 0x04) { + } else if (temp & 0x04) { result = -EIO; dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index 1c08c0921ee7..4d755ffc3f41 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -215,9 +215,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val) struct ad7606_state *st = iio_priv(indio_dev); DECLARE_BITMAP(values, 3);
- values[0] = val; + values[0] = val & GENMASK(2, 0);
- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc, + gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, st->gpio_os->info, values);
/* AD7616 requires a reset to update value */ @@ -422,7 +422,7 @@ static int ad7606_request_gpios(struct ad7606_state *st) return PTR_ERR(st->gpio_range);
st->gpio_standby = devm_gpiod_get_optional(dev, "standby", - GPIOD_OUT_HIGH); + GPIOD_OUT_LOW); if (IS_ERR(st->gpio_standby)) return PTR_ERR(st->gpio_standby);
@@ -665,7 +665,7 @@ static int ad7606_suspend(struct device *dev)
if (st->gpio_standby) { gpiod_set_value(st->gpio_range, 1); - gpiod_set_value(st->gpio_standby, 0); + gpiod_set_value(st->gpio_standby, 1); }
return 0; diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 263a778bcf25..287a0591533b 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev) static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); - unsigned long os[3] = {1}; + DECLARE_BITMAP(os, 3);
+ bitmap_fill(os, 3); /* * Software mode is enabled when all three oversampling * pins are set to high. If oversampling gpios are defined @@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) * otherwise, they must be hardwired to VDD */ if (st->gpio_os) { - gpiod_set_array_value(ARRAY_SIZE(os), + gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, st->gpio_os->info, os); } /* OS of 128 and 256 are available only in software mode */ diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 500f56834b01..a6bf689833da 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -10,6 +10,7 @@ */ #include <linux/acpi.h> #include <linux/bitfield.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> @@ -52,6 +53,7 @@ struct bme680_calib { struct bme680_data { struct regmap *regmap; struct bme680_calib bme680; + struct mutex lock; /* Protect multiple serial R/W ops to device. */ u8 oversampling_temp; u8 oversampling_press; u8 oversampling_humid; @@ -827,6 +829,8 @@ static int bme680_read_raw(struct iio_dev *indio_dev, { struct bme680_data *data = iio_priv(indio_dev);
+ guard(mutex)(&data->lock); + switch (mask) { case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { @@ -871,6 +875,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev, { struct bme680_data *data = iio_priv(indio_dev);
+ guard(mutex)(&data->lock); + if (val2 != 0) return -EINVAL;
@@ -967,6 +973,7 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, name = bme680_match_acpi_device(dev);
data = iio_priv(indio_dev); + mutex_init(&data->lock); dev_set_drvdata(dev, indio_dev); data->regmap = regmap; indio_dev->name = name; diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index eb706d0bf70b..3b7984fd129d 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -204,7 +204,6 @@ static long ak09912_raw_to_gauss(u16 data)
/* Compatible Asahi Kasei Compass parts */ enum asahi_compass_chipset { - AKXXXX = 0, AK8975, AK8963, AK09911, @@ -248,7 +247,7 @@ struct ak_def { };
static const struct ak_def ak_def_array[] = { - { + [AK8975] = { .type = AK8975, .raw_to_gauss = ak8975_raw_to_gauss, .range = 4096, @@ -273,7 +272,7 @@ static const struct ak_def ak_def_array[] = { AK8975_REG_HYL, AK8975_REG_HZL}, }, - { + [AK8963] = { .type = AK8963, .raw_to_gauss = ak8963_09911_raw_to_gauss, .range = 8190, @@ -298,7 +297,7 @@ static const struct ak_def ak_def_array[] = { AK8975_REG_HYL, AK8975_REG_HZL}, }, - { + [AK09911] = { .type = AK09911, .raw_to_gauss = ak8963_09911_raw_to_gauss, .range = 8192, @@ -323,7 +322,7 @@ static const struct ak_def ak_def_array[] = { AK09912_REG_HYL, AK09912_REG_HZL}, }, - { + [AK09912] = { .type = AK09912, .raw_to_gauss = ak09912_raw_to_gauss, .range = 32752, @@ -348,7 +347,7 @@ static const struct ak_def ak_def_array[] = { AK09912_REG_HYL, AK09912_REG_HZL}, }, - { + [AK09916] = { .type = AK09916, .raw_to_gauss = ak09912_raw_to_gauss, .range = 32752, @@ -813,13 +812,13 @@ static const struct iio_info ak8975_info = { };
static const struct acpi_device_id ak_acpi_match[] = { - {"AK8975", AK8975}, - {"AK8963", AK8963}, - {"INVN6500", AK8963}, - {"AK009911", AK09911}, - {"AK09911", AK09911}, - {"AKM9911", AK09911}, - {"AK09912", AK09912}, + {"AK8975", (kernel_ulong_t)&ak_def_array[AK8975] }, + {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] }, + {"INVN6500", (kernel_ulong_t)&ak_def_array[AK8963] }, + {"AK009911", (kernel_ulong_t)&ak_def_array[AK09911] }, + {"AK09911", (kernel_ulong_t)&ak_def_array[AK09911] }, + {"AKM9911", (kernel_ulong_t)&ak_def_array[AK09911] }, + {"AK09912", (kernel_ulong_t)&ak_def_array[AK09912] }, { } }; MODULE_DEVICE_TABLE(acpi, ak_acpi_match); @@ -883,10 +882,7 @@ static int ak8975_probe(struct i2c_client *client) struct iio_dev *indio_dev; struct gpio_desc *eoc_gpiod; struct gpio_desc *reset_gpiod; - const void *match; - unsigned int i; int err; - enum asahi_compass_chipset chipset; const char *name = NULL;
/* @@ -928,27 +924,15 @@ static int ak8975_probe(struct i2c_client *client) return err;
/* id will be NULL when enumerated via ACPI */ - match = device_get_match_data(&client->dev); - if (match) { - chipset = (uintptr_t)match; - name = dev_name(&client->dev); - } else if (id) { - chipset = (enum asahi_compass_chipset)(id->driver_data); - name = id->name; - } else - return -ENOSYS; - - for (i = 0; i < ARRAY_SIZE(ak_def_array); i++) - if (ak_def_array[i].type == chipset) - break; - - if (i == ARRAY_SIZE(ak_def_array)) { - dev_err(&client->dev, "AKM device type unsupported: %d\n", - chipset); + data->def = i2c_get_match_data(client); + if (!data->def) return -ENODEV; - }
- data->def = &ak_def_array[i]; + /* If enumerated via firmware node, fix the ABI */ + if (dev_fwnode(&client->dev)) + name = dev_name(&client->dev); + else + name = id->name;
/* Fetch the regulators */ data->vdd = devm_regulator_get(&client->dev, "vdd"); @@ -1077,28 +1061,27 @@ static DEFINE_RUNTIME_DEV_PM_OPS(ak8975_dev_pm_ops, ak8975_runtime_suspend, ak8975_runtime_resume, NULL);
static const struct i2c_device_id ak8975_id[] = { - {"ak8975", AK8975}, - {"ak8963", AK8963}, - {"AK8963", AK8963}, - {"ak09911", AK09911}, - {"ak09912", AK09912}, - {"ak09916", AK09916}, + {"ak8975", (kernel_ulong_t)&ak_def_array[AK8975] }, + {"ak8963", (kernel_ulong_t)&ak_def_array[AK8963] }, + {"AK8963", (kernel_ulong_t)&ak_def_array[AK8963] }, + {"ak09911", (kernel_ulong_t)&ak_def_array[AK09911] }, + {"ak09912", (kernel_ulong_t)&ak_def_array[AK09912] }, + {"ak09916", (kernel_ulong_t)&ak_def_array[AK09916] }, {} };
MODULE_DEVICE_TABLE(i2c, ak8975_id);
static const struct of_device_id ak8975_of_match[] = { - { .compatible = "asahi-kasei,ak8975", }, - { .compatible = "ak8975", }, - { .compatible = "asahi-kasei,ak8963", }, - { .compatible = "ak8963", }, - { .compatible = "asahi-kasei,ak09911", }, - { .compatible = "ak09911", }, - { .compatible = "asahi-kasei,ak09912", }, - { .compatible = "ak09912", }, - { .compatible = "asahi-kasei,ak09916", }, - { .compatible = "ak09916", }, + { .compatible = "asahi-kasei,ak8975", .data = &ak_def_array[AK8975] }, + { .compatible = "ak8975", .data = &ak_def_array[AK8975] }, + { .compatible = "asahi-kasei,ak8963", .data = &ak_def_array[AK8963] }, + { .compatible = "ak8963", .data = &ak_def_array[AK8963] }, + { .compatible = "asahi-kasei,ak09911", .data = &ak_def_array[AK09911] }, + { .compatible = "ak09911", .data = &ak_def_array[AK09911] }, + { .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] }, + { .compatible = "ak09912", .data = &ak_def_array[AK09912] }, + { .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] }, {} }; MODULE_DEVICE_TABLE(of, ak8975_of_match); diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b7251ed7a8df..0b88203720b0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1640,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device)
rdma_for_each_port (device, p) { err = ib_cache_update(device, p, true, true, true); - if (err) + if (err) { + gid_table_cleanup_one(device); return err; + } }
return 0; diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 2d09d1be38f1..3e4941754b48 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -1191,7 +1191,7 @@ static int __init iw_cm_init(void) if (ret) return ret;
- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM); if (!iwcm_wq) goto err_alloc;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 040ba2224f9f..b3757c6a0457 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) int ret;
ep = lookup_atid(t, atid); + if (!ep) + return -EINVAL;
pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); @@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) int ret = 0;
ep = lookup_atid(t, atid); + if (!ep) + return -EINVAL; + la = (struct sockaddr_in *)&ep->com.local_addr; ra = (struct sockaddr_in *)&ep->com.remote_addr; la6 = (struct sockaddr_in6 *)&ep->com.local_addr; diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index c317947563fb..b010c4209ea3 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -1540,11 +1540,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, return ret; }
+static enum ib_qp_state query_qp_state(struct erdma_qp *qp) +{ + switch (qp->attrs.state) { + case ERDMA_QP_STATE_IDLE: + return IB_QPS_INIT; + case ERDMA_QP_STATE_RTR: + return IB_QPS_RTR; + case ERDMA_QP_STATE_RTS: + return IB_QPS_RTS; + case ERDMA_QP_STATE_CLOSING: + return IB_QPS_ERR; + case ERDMA_QP_STATE_TERMINATE: + return IB_QPS_ERR; + case ERDMA_QP_STATE_ERROR: + return IB_QPS_ERR; + default: + return IB_QPS_ERR; + } +} + int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { - struct erdma_qp *qp; struct erdma_dev *dev; + struct erdma_qp *qp;
if (ibqp && qp_attr && qp_init_attr) { qp = to_eqp(ibqp); @@ -1571,6 +1591,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
+ qp_attr->qp_state = query_qp_state(qp); + qp_attr->cur_qp_state = query_qp_state(qp); + return 0; }
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index c4ac06a33869..7ebf80504fd1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1098,9 +1098,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level) * @bt_level: base address table level * @unit: ba entries per bt page */ -static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) +static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) { - u32 step; + u64 step; int max; int i;
@@ -1136,7 +1136,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, { struct hns_roce_buf_region *r; int total = 0; - int step; + u64 step; int i;
for (i = 0; i < region_cnt; i++) { @@ -1167,7 +1167,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, int ret = 0; int max_ofs; int level; - u32 step; + u64 step; int end;
if (hopnum <= 1) @@ -1191,10 +1191,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
/* config L1 bt to last bt and link them to corresponding parent */ for (level = 1; level < hopnum; level++) { - cur = hem_list_search_item(&mid_bt[level], offset); - if (cur) { - hem_ptrs[level] = cur; - continue; + if (!hem_list_is_bottom_bt(hopnum, level)) { + cur = hem_list_search_item(&mid_bt[level], offset); + if (cur) { + hem_ptrs[level] = cur; + continue; + } }
step = hem_list_calc_ba_range(hopnum, level, unit); @@ -1204,7 +1206,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, }
start_aligned = (distance / step) * step + r->offset; - end = min_t(int, start_aligned + step - 1, max_ofs); + end = min_t(u64, start_aligned + step - 1, max_ofs); cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, true); if (!cur) { @@ -1293,7 +1295,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base, struct hns_roce_hem_item *hem, *temp_hem; int total = 0; int offset; - int step; + u64 step;
step = hem_list_calc_ba_range(r->hopnum, 1, unit); if (step < 1) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index a49280e2df8c..8066750afab9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1653,8 +1653,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) { bd_idx = i / CNT_PER_DESC; - if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) && - bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC) + if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC && + !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT))) break;
cnt_data = (__le64 *)&desc[bd_idx].data[0]; @@ -2932,6 +2932,9 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + free_mr_exit(hr_dev); + hns_roce_function_clear(hr_dev);
if (!hr_dev->is_vf) @@ -4391,12 +4394,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, upper_32_bits(to_hr_hw_page_addr(mtts[0]))); hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
- context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); - qpc_mask->rq_nxt_blk_addr = 0; - - hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, - upper_32_bits(to_hr_hw_page_addr(mtts[1]))); - hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + context->rq_nxt_blk_addr = + cpu_to_le32(to_hr_hw_page_addr(mtts[1])); + qpc_mask->rq_nxt_blk_addr = 0; + hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, + upper_32_bits(to_hr_hw_page_addr(mtts[1]))); + hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); + }
return 0; } @@ -6065,6 +6070,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, struct pci_dev *pdev = hr_dev->pci_dev; struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); const struct hnae3_ae_ops *ops = ae_dev->ops; + enum hnae3_reset_type reset_type; irqreturn_t int_work = IRQ_NONE; u32 int_en;
@@ -6076,10 +6082,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
+ reset_type = hr_dev->is_vf ? + HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET; + /* Set reset level for reset_event() */ if (ops->set_default_reset_request) - ops->set_default_reset_request(ae_dev, - HNAE3_FUNC_RESET); + ops->set_default_reset_request(ae_dev, reset_type); if (ops->reset_event) ops->reset_event(pdev, NULL);
@@ -6149,7 +6157,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) res_type == ECC_RESOURCE_SCCC) return le64_to_cpu(*data);
- return le64_to_cpu(*data) << PAGE_SHIFT; + return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT; }
static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, @@ -6777,9 +6785,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; hns_roce_handle_device_err(hr_dev);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) - free_mr_exit(hr_dev); - hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index bff00b3af41f..04063cfacae5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1379,19 +1379,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __acquire(&send_cq->lock); __acquire(&recv_cq->lock); } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); __acquire(&send_cq->lock); } else if (send_cq == recv_cq) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } @@ -1411,13 +1411,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, spin_unlock(&recv_cq->lock); } else if (send_cq == recv_cq) { __release(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else { spin_unlock(&send_cq->lock); - spin_unlock_irq(&recv_cq->lock); + spin_unlock(&recv_cq->lock); } }
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index ca3e89909ecf..38cecb28d322 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -1347,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { ibdev_err(&iwdev->ibdev, "rd_atomic = %d, above max_hw_ird=%d\n", - attr->max_rd_atomic, + attr->max_dest_rd_atomic, dev->hw_attrs.max_hw_ird); return -EINVAL; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 2d179bc56ce6..296af7a5c279 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -539,7 +539,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, if (!ndev) goto out;
- if (dev->lag_active) { + if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) { rcu_read_lock(); upper = netdev_master_upper_dev_get_rcu(ndev); if (upper) { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 50a1786231c7..9e465cf99733 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -48,6 +48,7 @@ enum { MAX_PENDING_REG_MR = 8, };
+#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4 #define MLX5_UMR_ALIGN 2048
static void @@ -715,6 +716,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, { struct rb_node *node = dev->cache.rb_root.rb_node; struct mlx5_cache_ent *cur, *smallest = NULL; + u64 ndescs_limit; int cmp;
/* @@ -733,10 +735,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, return cur; }
+ /* + * Limit the usage of mkeys larger than twice the required size while + * also allowing the usage of smallest cache entry for small MRs. + */ + ndescs_limit = max_t(u64, rb_key.ndescs * 2, + MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS); + return (smallest && smallest->rb_key.access_mode == rb_key.access_mode && smallest->rb_key.access_flags == rb_key.access_flags && - smallest->rb_key.ats == rb_key.ats) ? + smallest->rb_key.ats == rb_key.ats && + smallest->rb_key.ndescs <= ndescs_limit) ? smallest : NULL; } @@ -986,7 +996,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) mlx5_mkey_cache_debugfs_init(dev); mutex_lock(&cache->rb_lock); for (i = 0; i <= mkey_cache_max_order(dev); i++) { - rb_key.ndescs = 1 << (i + 2); + rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i; ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); if (IS_ERR(ent)) { ret = PTR_ERR(ent); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 1aee62aa1515..82aa47efb807 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -626,6 +626,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) */ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) return; + clt_path->s.hb_missed_cnt = 0; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); if (imm_type == RTRS_IO_RSP_IMM || @@ -643,7 +644,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) return rtrs_clt_recv_done(con, wc); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - clt_path->s.hb_missed_cnt = 0; clt_path->s.hb_cur_latency = ktime_sub(ktime_get(), clt_path->s.hb_last_sent); if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) @@ -670,6 +670,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) /* * Key invalidations from server side */ + clt_path->s.hb_missed_cnt = 0; WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || wc->wc_flags & IB_WC_WITH_IMM)); WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); @@ -2341,6 +2342,12 @@ static int init_conns(struct rtrs_clt_path *clt_path) if (err) goto destroy; } + + /* + * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds. + */ + cid = clt_path->s.con_num - 1; + err = alloc_path_reqs(clt_path); if (err) goto destroy; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 1d33efb8fb03..94ac99a4f696 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -1229,6 +1229,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) */ if (WARN_ON(wc->wr_cqe != &io_comp_cqe)) return; + srv_path->s.hb_missed_cnt = 0; err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 61e8e43e9c2b..489813274401 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -627,7 +627,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad)
for (i = 0; i < KEYP_MAX_EVENT; i++) { ret = adp5588_read(client, KEY_EVENTA); - if (ret) + if (ret < 0) return ret; }
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index bad238f69a7a..34d1f07ea4c3 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1120,6 +1120,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, + /* + * Some TongFang barebones have touchpad and/or keyboard issues after + * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of + * them have an external PS/2 port so this can safely be set for all of + * them. + * TongFang barebones come with board_vendor and/or system_vendor set to + * a different value for each individual reseller. The only somewhat + * universal way to identify them is by board_name. + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c index 2f872e95fbba..e719f5da68bf 100644 --- a/drivers/input/touchscreen/ilitek_ts_i2c.c +++ b/drivers/input/touchscreen/ilitek_ts_i2c.c @@ -37,6 +37,8 @@ #define ILITEK_TP_CMD_GET_MCU_VER 0x61 #define ILITEK_TP_CMD_GET_IC_MODE 0xC0
+#define ILITEK_TP_I2C_REPORT_ID 0x48 + #define REPORT_COUNT_ADDRESS 61 #define ILITEK_SUPPORT_MAX_POINT 40
@@ -160,15 +162,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64); if (error) { dev_err(dev, "get touch info failed, err:%d\n", error); - goto err_sync_frame; + return error; + } + + if (buf[0] != ILITEK_TP_I2C_REPORT_ID) { + dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]); + return -EINVAL; }
report_max_point = buf[REPORT_COUNT_ADDRESS]; if (report_max_point > ts->max_tp) { dev_err(dev, "FW report max point:%d > panel info. max:%d\n", report_max_point, ts->max_tp); - error = -EINVAL; - goto err_sync_frame; + return -EINVAL; }
count = DIV_ROUND_UP(report_max_point, packet_max_point); @@ -178,7 +184,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) if (error) { dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n", count, error); - goto err_sync_frame; + return error; } }
@@ -203,10 +209,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) ilitek_touch_down(ts, id, x, y); }
-err_sync_frame: input_mt_sync_frame(input); input_sync(input); - return error; + + return 0; }
/* APIs of cmds for ILITEK Touch IC */ diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c index d787f2ea36d9..a91df709cfb2 100644 --- a/drivers/interconnect/icc-clk.c +++ b/drivers/interconnect/icc-clk.c @@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev, onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL); if (!onecell) return ERR_PTR(-ENOMEM); + onecell->num_nodes = 2 * num_clocks;
qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL); if (!qp) @@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev, onecell->nodes[j++] = node; }
- onecell->num_nodes = j; - ret = icc_provider_register(provider); if (ret) goto err; diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index e9ef2e0a62f6..cbf0c4601512 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -50,7 +50,7 @@ static inline u64 set_pgtable_attr(u64 *page) u64 prot;
prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER; - prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY; + prot |= IOMMU_PAGE_ACCESS;
return (iommu_virt_to_phys(page) | prot); } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 6e6cb19c81f5..d49158936019 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -278,6 +278,20 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) u32 smr; int i;
+ /* + * MSM8998 LPASS SMMU reports 13 context banks, but accessing + * the last context bank crashes the system. + */ + if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") && + smmu->num_context_banks == 13) { + smmu->num_context_banks = 12; + } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) { + if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */ + smmu->num_context_banks = 7; + else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */ + smmu->num_context_banks = 13; + } + /* * Some platforms support more than the Arm SMMU architected maximum of * 128 stream matching groups. For unknown reasons, the additional @@ -334,6 +348,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) return 0; }
+static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu) +{ + /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */ + smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K; + + /* TZ protects several last context banks, hide them from Linux */ + if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") && + smmu->num_context_banks == 5) + smmu->num_context_banks = 2; + + return 0; +} + static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; @@ -424,6 +451,7 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { .init_context = qcom_adreno_smmu_init_context, + .cfg_probe = qcom_adreno_smmuv2_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, .write_sctlr = qcom_adreno_smmu_write_sctlr, diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 2d22c027aa59..e76b22939994 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -111,6 +111,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, unsigned long page_offset = uptr % PAGE_SIZE; struct interval_tree_double_span_iter used_span; struct interval_tree_span_iter allowed_span; + unsigned long max_alignment = PAGE_SIZE; unsigned long iova_alignment;
lockdep_assert_held(&iopt->iova_rwsem); @@ -130,6 +131,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, roundup_pow_of_two(length), 1UL << __ffs64(uptr));
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE + max_alignment = HPAGE_SIZE; +#endif + /* Protect against ALIGN() overflow */ + if (iova_alignment >= max_alignment) + iova_alignment = max_alignment; + if (iova_alignment < iopt->iova_alignment) return -EINVAL;
diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c index 3fda712d2f80..c1181a35d0f7 100644 --- a/drivers/leds/leds-bd2606mvv.c +++ b/drivers/leds/leds-bd2606mvv.c @@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = {
static int bd2606mvv_probe(struct i2c_client *client) { - struct fwnode_handle *np, *child; struct device *dev = &client->dev; struct bd2606mvv_priv *priv; struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 }; int active_pairs[BD2606_MAX_LEDS / 2] = { 0 }; int err, reg; - int i; + int i, j;
- np = dev_fwnode(dev); - if (!np) + if (!dev_fwnode(dev)) return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client)
i2c_set_clientdata(client, priv);
- fwnode_for_each_available_child_node(np, child) { + device_for_each_child_node_scoped(dev, child) { struct bd2606mvv_led *led;
err = fwnode_property_read_u32(child, "reg", ®); - if (err) { - fwnode_handle_put(child); + if (err) return err; - } - if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) { - fwnode_handle_put(child); + + if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) return -EINVAL; - } + led = &priv->leds[reg]; - led_fwnodes[reg] = child; + led_fwnodes[reg] = fwnode_handle_get(child); active_pairs[reg / 2]++; led->priv = priv; led->led_no = reg; @@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client) &priv->leds[i].ldev, &init_data); if (err < 0) { - fwnode_handle_put(child); + for (j = i; j < BD2606_MAX_LEDS; j++) + fwnode_handle_put(led_fwnodes[j]); return dev_err_probe(dev, err, "couldn't register LED %s\n", priv->leds[i].ldev.name); diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c index 78215dff1499..11c7bb69573e 100644 --- a/drivers/leds/leds-pca995x.c +++ b/drivers/leds/leds-pca995x.c @@ -19,10 +19,6 @@ #define PCA995X_MODE1 0x00 #define PCA995X_MODE2 0x01 #define PCA995X_LEDOUT0 0x02 -#define PCA9955B_PWM0 0x08 -#define PCA9952_PWM0 0x0A -#define PCA9952_IREFALL 0x43 -#define PCA9955B_IREFALL 0x45
/* Auto-increment disabled. Normal mode */ #define PCA995X_MODE1_CFG 0x00 @@ -34,17 +30,38 @@ #define PCA995X_LDRX_MASK 0x3 #define PCA995X_LDRX_BITS 2
-#define PCA995X_MAX_OUTPUTS 16 +#define PCA995X_MAX_OUTPUTS 24 #define PCA995X_OUTPUTS_PER_REG 4
#define PCA995X_IREFALL_FULL_CFG 0xFF #define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2)
-#define PCA995X_TYPE_NON_B 0 -#define PCA995X_TYPE_B 1 - #define ldev_to_led(c) container_of(c, struct pca995x_led, ldev)
+struct pca995x_chipdef { + unsigned int num_leds; + u8 pwm_base; + u8 irefall; +}; + +static const struct pca995x_chipdef pca9952_chipdef = { + .num_leds = 16, + .pwm_base = 0x0a, + .irefall = 0x43, +}; + +static const struct pca995x_chipdef pca9955b_chipdef = { + .num_leds = 16, + .pwm_base = 0x08, + .irefall = 0x45, +}; + +static const struct pca995x_chipdef pca9956b_chipdef = { + .num_leds = 24, + .pwm_base = 0x0a, + .irefall = 0x40, +}; + struct pca995x_led { unsigned int led_no; struct led_classdev ldev; @@ -54,7 +71,7 @@ struct pca995x_led { struct pca995x_chip { struct regmap *regmap; struct pca995x_led leds[PCA995X_MAX_OUTPUTS]; - int btype; + const struct pca995x_chipdef *chipdef; };
static int pca995x_brightness_set(struct led_classdev *led_cdev, @@ -62,10 +79,11 @@ static int pca995x_brightness_set(struct led_classdev *led_cdev, { struct pca995x_led *led = ldev_to_led(led_cdev); struct pca995x_chip *chip = led->chip; + const struct pca995x_chipdef *chipdef = chip->chipdef; u8 ledout_addr, pwmout_addr; int shift, ret;
- pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no; + pwmout_addr = chipdef->pwm_base + led->led_no; ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG); shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG);
@@ -102,43 +120,38 @@ static const struct regmap_config pca995x_regmap = { static int pca995x_probe(struct i2c_client *client) { struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 }; - struct fwnode_handle *np, *child; struct device *dev = &client->dev; + const struct pca995x_chipdef *chipdef; struct pca995x_chip *chip; struct pca995x_led *led; - int i, btype, reg, ret; + int i, j, reg, ret;
- btype = (unsigned long)device_get_match_data(&client->dev); + chipdef = device_get_match_data(&client->dev);
- np = dev_fwnode(dev); - if (!np) + if (!dev_fwnode(dev)) return -ENODEV;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM;
- chip->btype = btype; + chip->chipdef = chipdef; chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap); if (IS_ERR(chip->regmap)) return PTR_ERR(chip->regmap);
i2c_set_clientdata(client, chip);
- fwnode_for_each_available_child_node(np, child) { + device_for_each_child_node_scoped(dev, child) { ret = fwnode_property_read_u32(child, "reg", ®); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - }
- if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) { - fwnode_handle_put(child); + if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) return -EINVAL; - }
led = &chip->leds[reg]; - led_fwnodes[reg] = child; + led_fwnodes[reg] = fwnode_handle_get(child); led->chip = chip; led->led_no = reg; led->ldev.brightness_set_blocking = pca995x_brightness_set; @@ -157,7 +170,8 @@ static int pca995x_probe(struct i2c_client *client) &chip->leds[i].ldev, &init_data); if (ret < 0) { - fwnode_handle_put(child); + for (j = i; j < PCA995X_MAX_OUTPUTS; j++) + fwnode_handle_put(led_fwnodes[j]); return dev_err_probe(dev, ret, "Could not register LED %s\n", chip->leds[i].ldev.name); @@ -170,21 +184,21 @@ static int pca995x_probe(struct i2c_client *client) return ret;
/* IREF Output current value for all LEDn outputs */ - return regmap_write(chip->regmap, - btype ? PCA9955B_IREFALL : PCA9952_IREFALL, - PCA995X_IREFALL_HALF_CFG); + return regmap_write(chip->regmap, chipdef->irefall, PCA995X_IREFALL_HALF_CFG); }
static const struct i2c_device_id pca995x_id[] = { - { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B }, - { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B }, + { "pca9952", .driver_data = (kernel_ulong_t)&pca9952_chipdef }, + { "pca9955b", .driver_data = (kernel_ulong_t)&pca9955b_chipdef }, + { "pca9956b", .driver_data = (kernel_ulong_t)&pca9956b_chipdef }, {} }; MODULE_DEVICE_TABLE(i2c, pca995x_id);
static const struct of_device_id pca995x_of_match[] = { - { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B }, - { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B }, + { .compatible = "nxp,pca9952", .data = &pca9952_chipdef }, + { .compatible = "nxp,pca9955b", . data = &pca9955b_chipdef }, + { .compatible = "nxp,pca9956b", .data = &pca9956b_chipdef }, {}, }; MODULE_DEVICE_TABLE(of, pca995x_of_match); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f7e9a3632eb3..499f8cc8a39f 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); dm_put_live_table(md, srcu_idx); - return BLK_STS_RESOURCE; + return BLK_STS_IOERR; } ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8ec0a263744a..5dd0a42463a2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1817,10 +1817,15 @@ static void dm_submit_bio(struct bio *bio) struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; + }
- /* If suspended, or map not yet available, queue this IO for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || - unlikely(!map)) { + /* If suspended, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); else if (bio->bi_opf & REQ_RAHEAD) diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 35c969fd2cb5..4e59734ec53e 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on index, pid, onoff);
/* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0;
if (onoff) diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 601cf45c3935..e6a7877a9854 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, index, pid, onoff, dev->slave_ts);
/* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0;
if (onoff) diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c index 5600f1df653d..b55fbd6a8a66 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c @@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr;
mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p", inst->num_nalu, y_fb_dma, c_fb_dma, fb); diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c index 0e741e0dc8ba..bb0ad93c6b78 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c @@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr; mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx", inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c index f64b21c07169..86b93055f163 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c @@ -335,14 +335,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); - dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + }
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; + dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + y_fb_dma = fb->base_y.dma_addr; if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) c_fb_dma = y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h; else - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + c_fb_dma = fb->base_c.dma_addr;
inst->vsi->dec.bs_dma = (u64)bs->dma_addr; inst->vsi->dec.bs_sz = bs->size; diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c index ad2bd71037ab..246eec259c5d 100644 --- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c +++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c @@ -854,6 +854,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = { { .compatible = "renesas,rzg2l-csi2", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
static struct platform_driver rzg2l_csi2_pdrv = { .remove_new = rzg2l_csi2_remove, diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h index 07aeead0644a..724952e001cd 100644 --- a/drivers/media/tuners/tuner-i2c.h +++ b/drivers/media/tuners/tuner-i2c.h @@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props, } \ if (0 == __ret) { \ state = kzalloc(sizeof(type), GFP_KERNEL); \ - if (!state) { \ - __ret = -ENOMEM; \ + if (NULL == state) \ goto __fail; \ - } \ state->i2c_props.addr = i2caddr; \ state->i2c_props.adap = i2cadap; \ state->i2c_props.name = devname; \ diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 36e060386e59..59e1b3a4406e 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev, * get them */ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!mtd->name) + return -ENOMEM; + mtd->type = MTD_NORFLASH; mtd->flags = MTD_WRITEABLE; mtd->size = size; diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 28131a127d06..8297b366a066 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -296,10 +296,12 @@ static int __init init_slram(void) T("slram: devname = %s\n", devname); if ((!map) || (!(devstart = strsep(&map, ",")))) { E("slram: No devicestart specified.\n"); + break; } T("slram: devstart = %s\n", devstart); if ((!map) || (!(devlength = strsep(&map, ",")))) { E("slram: No devicelength / -end specified.\n"); + break; } T("slram: devlength = %s\n", devlength); if (parse_cmdline(devname, devstart, devlength) != 0) { diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 29c8bddde67f..161a409ca4ed 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, return 0; }
+static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc) +{ + struct mtk_nfc_nand_chip *mtk_chip; + struct nand_chip *chip; + int ret; + + while (!list_empty(&nfc->chips)) { + mtk_chip = list_first_entry(&nfc->chips, + struct mtk_nfc_nand_chip, node); + chip = &mtk_chip->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&mtk_chip->node); + } +} + static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) { struct device_node *np = dev->of_node; - struct device_node *nand_np; int ret;
- for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); if (ret) { - of_node_put(nand_np); + mtk_nfc_nand_chips_cleanup(nfc); return ret; } } @@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev) static void mtk_nfc_remove(struct platform_device *pdev) { struct mtk_nfc *nfc = platform_get_drvdata(pdev); - struct mtk_nfc_nand_chip *mtk_chip; - struct nand_chip *chip; - int ret; - - while (!list_empty(&nfc->chips)) { - mtk_chip = list_first_entry(&nfc->chips, - struct mtk_nfc_nand_chip, node); - chip = &mtk_chip->nand; - ret = mtd_device_unregister(nand_to_mtd(chip)); - WARN_ON(ret); - nand_cleanup(chip); - list_del(&mtk_chip->node); - }
+ mtk_nfc_nand_chips_cleanup(nfc); mtk_ecc_release(nfc->ecc); }
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 277493e41b07..54767154de26 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -67,6 +67,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) __be16 proto; void *oiph; int err; + int nh;
bareudp = rcu_dereference_sk_user_data(sk); if (!bareudp) @@ -144,10 +145,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) } skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; - oiph = skb_network_header(skb); - skb_reset_network_header(skb); skb_reset_mac_header(skb);
+ /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + + skb_reset_network_header(skb); + + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(bareudp->dev, rx_length_errors); + DEV_STATS_INC(bareudp->dev, rx_errors); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else @@ -303,6 +319,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be32 saddr; int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + return -EINVAL; + if (!sock) return -ESHUTDOWN;
@@ -366,6 +385,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + return -EINVAL; + if (!sock) return -ESHUTDOWN;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 53a7b53618d9..14b4780b73c7 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5534,9 +5534,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) break;
default: - /* Should never happen. Mode guarded by bond_xdp_check() */ - netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); - WARN_ON_ONCE(1); + if (net_ratelimit()) + netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", + BOND_MODE(bond)); return NULL; }
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index fb77fd74de27..97666a759595 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1599,11 +1599,7 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral) - napi_disable(&cdev->napi); - m_can_stop(dev); - m_can_clk_stop(cdev); free_irq(dev->irq, dev);
if (cdev->is_peripheral) { @@ -1611,10 +1607,13 @@ static int m_can_close(struct net_device *dev) destroy_workqueue(cdev->tx_wq); cdev->tx_wq = NULL; can_rx_offload_disable(&cdev->offload); + } else { + napi_disable(&cdev->napi); }
close_candev(dev);
+ m_can_clk_stop(cdev); phy_power_off(cdev->transceiver);
return 0; @@ -1842,6 +1841,8 @@ static int m_can_open(struct net_device *dev)
if (cdev->is_peripheral) can_rx_offload_enable(&cdev->offload); + else + napi_enable(&cdev->napi);
/* register interrupt handler */ if (cdev->is_peripheral) { @@ -1873,9 +1874,6 @@ static int m_can_open(struct net_device *dev) if (err) goto exit_start_fail;
- if (!cdev->is_peripheral) - napi_enable(&cdev->napi); - netif_start_queue(dev);
return 0; @@ -1889,6 +1887,8 @@ static int m_can_open(struct net_device *dev) out_wq_fail: if (cdev->is_peripheral) can_rx_offload_disable(&cdev->offload); + else + napi_disable(&cdev->napi); close_candev(dev); exit_disable_clks: m_can_clk_stop(cdev); diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 41a0e4261d15..03ad10b01867 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -3,7 +3,7 @@ * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro * * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs socketcan@esd.eu - * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus frank.jungclaus@esd.eu + * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus frank.jungclaus@esd.eu */
#include <linux/can.h> @@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev) if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - flags |= ESD_USB_3_BAUDRATE_FLAG_TRS; - baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) @@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { case ESD_USB_CANUSB3_PRODUCT_ID: priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 0f5a4ec505dd..18e3a9cd4fc0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2305,12 +2305,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); - err = request_irq(irq, enetc_msix, 0, v->name, v); + err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); if (err) { dev_err(priv->dev, "request_irq() failed!\n"); goto irq_err; } - disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index b50f16786c24..6ab89f478285 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); rtl8168g_enable_gphy_10m(phydev);
+ rtl8168g_disable_aldps(phydev); rtl8125a_config_eee_phy(phydev); }
@@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
rtl8125_legacy_force_mode(phydev); + rtl8168g_disable_aldps(phydev); rtl8125b_config_eee_phy(phydev); }
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c672f92d65e9..9319a2675e7b 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); ecard_set_drvdata(ec, NULL);
unregister_netdev(dev); + del_timer_sync(&priv(dev)->timer); free_netdev(dev); ecard_release_resources(ec); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 9e40c28d453a..ee3604f58def 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -35,6 +35,9 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) /* Disable RX queues routing by default */ plat->rx_queues_cfg[0].pkt_route = 0x0;
+ plat->clk_ref_rate = 125000000; + plat->clk_ptp_rate = 125000000; + /* Default to phy auto-detection */ plat->phy_addr = -1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d6167a7b19f2..89a80e3e8bb8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2018,7 +2018,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, rx_q->queue_index = queue; rx_q->priv_data = priv;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0); pp_params.pool_size = dma_conf->dma_rx_size; num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index bba44ff0e287..c37500aa0637 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1585,7 +1585,7 @@ static void wx_set_num_queues(struct wx *wx) */ static int wx_acquire_msix_vectors(struct wx *wx) { - struct irq_affinity affd = { .pre_vectors = 1 }; + struct irq_affinity affd = {0, }; int nvecs, i;
nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 65d7aaad43fe..62c10eb4f0ad 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -652,15 +652,15 @@ static int axienet_device_reset(struct net_device *ndev) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. - * Returns the number of descriptors handled. + * Returns the number of packets handled. */ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, int nr_bds, bool force, u32 *sizep, int budget) { struct axidma_bd *cur_p; unsigned int status; + int i, packets = 0; dma_addr_t phys; - int i;
for (i = 0; i < nr_bds; i++) { cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; @@ -679,8 +679,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) + if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { napi_consume_skb(cur_p->skb, budget); + packets++; + }
cur_p->app0 = 0; cur_p->app1 = 0; @@ -696,7 +698,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; }
- return i; + if (!force) { + lp->tx_bd_ci += i; + if (lp->tx_bd_ci >= lp->tx_bd_num) + lp->tx_bd_ci %= lp->tx_bd_num; + } + + return packets; }
/** @@ -747,13 +755,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) u32 size = 0; int packets;
- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); + packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, + &size, budget);
if (packets) { - lp->tx_bd_ci += packets; - if (lp->tx_bd_ci >= lp->tx_bd_num) - lp->tx_bd_ci %= lp->tx_bd_num; - u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); @@ -1042,9 +1047,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_tx); + if (napi_schedule_prep(&lp->napi_tx)) { + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_tx); + } }
return IRQ_HANDLED; @@ -1086,9 +1092,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_rx); + if (napi_schedule_prep(&lp->napi_rx)) { + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_rx); + } }
return IRQ_HANDLED; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 6cc1b56ddde2..60c58dd6d253 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, void usbnet_defer_kevent (struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) - netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]); - else - netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]); + if (!usbnet_going_away(dev)) { + if (!schedule_work(&dev->kevent)) + netdev_dbg(dev->net, + "kevent %s may have been dropped\n", + usbnet_event_names[work]); + else + netdev_dbg(dev->net, + "kevent %s scheduled\n", usbnet_event_names[work]); + } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + if (!usbnet_going_away(dev)) + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); @@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */ dev->flags = 0; - del_timer_sync (&dev->delay); - tasklet_kill (&dev->bh); + del_timer_sync(&dev->delay); + tasklet_kill(&dev->bh); cancel_work_sync(&dev->kevent); + + /* We have cyclic dependencies. Those calls are needed + * to break a cycle. We cannot fall into the gaps because + * we have a flag + */ + tasklet_kill(&dev->bh); + del_timer_sync(&dev->delay); + cancel_work_sync(&dev->kevent); + if (!pm) usb_autopm_put_interface(dev->intf);
@@ -1171,7 +1186,8 @@ usbnet_deferred_kevent (struct work_struct *work) status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + tasklet_schedule(&dev->bh); } }
@@ -1196,7 +1212,8 @@ usbnet_deferred_kevent (struct work_struct *work) usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + tasklet_schedule(&dev->bh); } }
@@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t) } else if (netif_running (dev->net) && netif_device_present (dev->net) && netif_carrier_ok(dev->net) && + !usbnet_going_away(dev) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_PAUSED, &dev->flags) && !test_bit(EVENT_RX_HALT, &dev->flags)) { @@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (!dev) return; + usbnet_mark_going_away(dev);
xdev = interface_to_usbdev (intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bc01f2dafa94..2da3be3fb942 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1269,6 +1269,11 @@ static struct sk_buff *receive_small(struct net_device *dev, struct page *page = virt_to_head_page(buf); struct sk_buff *skb;
+ /* We passed the address of virtnet header to virtio-core, + * so truncate the padding. + */ + buf -= VIRTNET_RX_PAD + xdp_headroom; + len -= vi->hdr_len; u64_stats_add(&stats->bytes, len);
@@ -1859,8 +1864,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!buf)) return -ENOMEM;
- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, - vi->hdr_len + GOOD_PACKET_LEN); + buf += VIRTNET_RX_PAD + xdp_headroom; + + virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index dd2a7c95517b..4bb30e403728 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -1681,9 +1681,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ - ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) { if (sta->deflink.vht_cap.vht_supported) diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 21399ad233c0..9105fdd14c66 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -1501,6 +1501,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, sizeof(*cmd)); cmd->req_type = cpu_to_le32(type); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI bss chan info req type %d\n", type); diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index c75a6fa1f7e0..a19a2c29f226 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -3058,6 +3058,7 @@ struct wmi_pdev_bss_chan_info_req_cmd { __le32 tlv_header; /* ref wmi_bss_chan_info_req_type */ __le32 req_type; + __le32 pdev_id; } __packed;
struct wmi_ap_ps_peer_cmd { @@ -4033,7 +4034,6 @@ struct wmi_vdev_stopped_event { } __packed;
struct wmi_pdev_bss_chan_info_event { - __le32 pdev_id; __le32 freq; /* Units in MHz */ __le32 noise_floor; /* units are dBm */ /* rx clear - how often the channel was unused */ @@ -4051,6 +4051,7 @@ struct wmi_pdev_bss_chan_info_event { /*rx_cycle cnt for my bss in 64bits format */ __le32 rx_bss_cycle_count_low; __le32 rx_bss_cycle_count_high; + __le32 pdev_id; } __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index a0376a6787b8..808fb6747a7f 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir("ath9k", sc->hw->wiphy->debugfsdir); - if (IS_ERR(sc->debug.debugfs_phy)) - return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG debugfs_create_file("debug", 0600, sc->debug.debugfs_phy, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index 278ddc713fdc..7b1452822431 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); - if (IS_ERR(priv->debug.debugfs_phy)) - return -ENOMEM;
ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c index 7ea2631b8069..00794086cc7c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c @@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data) { *data = addr;
- return brcmf_fil_iovar_int_get(ifp, "btc_params", data); + return brcmf_fil_iovar_int_query(ifp, "btc_params", data); }
/** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index da4968e66725..c708ae91c3ce 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -663,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr) /* interface_create version 3+ */ /* get supported version from firmware side */ iface_create_ver = 0; - err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", - &iface_create_ver); + err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", + &iface_create_ver); if (err) { brcmf_err("fail to get supported version, err=%d\n", err); return -EOPNOTSUPP; @@ -756,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) /* interface_create version 3+ */ /* get supported version from firmware side */ iface_create_ver = 0; - err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", - &iface_create_ver); + err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", + &iface_create_ver); if (err) { brcmf_err("fail to get supported version, err=%d\n", err); return -EOPNOTSUPP; @@ -2101,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) if (!sme->crypto.n_akm_suites) return 0;
- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); + err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), + "wpa_auth", &val); if (err) { bphy_err(drvr, "could not get wpa_auth (%d)\n", err); return err; @@ -2680,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); struct brcmf_pub *drvr = cfg->pub; - s32 qdbm = 0; + s32 qdbm; s32 err;
brcmf_dbg(TRACE, "Enter\n"); @@ -7046,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; - err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info", - &chaninfo); + err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info", + &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) channel->flags |= @@ -7081,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
/* verify support for bw_cap command */ val = WLC_BAND_5G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val);
if (!err) { /* only set 2G bandwidth using bw_cap command */ @@ -7157,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) int err;
band = WLC_BAND_2G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); if (!err) { bw_cap[NL80211_BAND_2GHZ] = band; band = WLC_BAND_5G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); if (!err) { bw_cap[NL80211_BAND_5GHZ] = band; return; @@ -7170,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) return; } brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n"); - mimo_bwcap = 0; err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap); if (err) /* assume 20MHz if firmware does not give a clue */ @@ -7266,7 +7266,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); struct wiphy *wiphy = cfg_to_wiphy(cfg); - u32 nmode = 0; + u32 nmode; u32 vhtmode = 0; u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; u32 rxchain; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index a9514d72f770..6385a7db7f7d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err; } +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len) @@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err; } - - -s32 -brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data) -{ - s32 err; - __le32 data_le = cpu_to_le32(data); - - mutex_lock(&ifp->drvr->proto_block); - brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data); - err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true); - mutex_unlock(&ifp->drvr->proto_block); - - return err; -} - -s32 -brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) -{ - s32 err; - __le32 data_le = cpu_to_le32(*data); - - mutex_lock(&ifp->drvr->proto_block); - err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false); - mutex_unlock(&ifp->drvr->proto_block); - *data = le32_to_cpu(data_le); - brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); - - return err; -} +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get);
static u32 brcmf_create_iovar(const char *name, const char *data, u32 datalen, @@ -271,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data, mutex_unlock(&drvr->proto_block); return err; } - -s32 -brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data) -{ - __le32 data_le = cpu_to_le32(data); - - return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le)); -} - -s32 -brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data) -{ - __le32 data_le = cpu_to_le32(*data); - s32 err; - - err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le)); - if (err == 0) - *data = le32_to_cpu(data_le); - return err; -} +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get);
static u32 brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen, @@ -365,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, mutex_unlock(&drvr->proto_block); return err; } +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set);
s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, @@ -395,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, mutex_unlock(&drvr->proto_block); return err; } - -s32 -brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data) -{ - __le32 data_le = cpu_to_le32(data); - - return brcmf_fil_bsscfg_data_set(ifp, name, &data_le, - sizeof(data_le)); -} - -s32 -brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data) -{ - __le32 data_le = cpu_to_le32(*data); - s32 err; - - err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le, - sizeof(data_le)); - if (err == 0) - *data = le32_to_cpu(data_le); - return err; -} +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get);
static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len, char *buf, u32 buflen) @@ -466,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id, mutex_unlock(&drvr->proto_block); return err; } +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id, void *data, u32 len) @@ -495,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id, mutex_unlock(&drvr->proto_block); return err; } - -s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data) -{ - __le32 data_le = cpu_to_le32(data); - - return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le, - sizeof(data_le)); -} - -s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data) -{ - __le32 data_le = cpu_to_le32(*data); - s32 err; - - err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); - if (err == 0) - *data = le32_to_cpu(data_le); - return err; -} - -s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data) -{ - return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data)); -} - -s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data) -{ - __le16 data_le = cpu_to_le16(*data); - s32 err; - - err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); - if (err == 0) - *data = le16_to_cpu(data_le); - return err; -} - +BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index bc693157c4b1..31e080e4da66 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -81,29 +81,142 @@
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len); s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len); -s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data); -s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data); +static inline +s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data) +{ + s32 err; + __le32 data_le = cpu_to_le32(data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data, - u32 len); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data); + err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le)); + + return err; +} +static inline +s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) +{ + s32 err; + + err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data)); + if (err == 0) + *data = le32_to_cpu(*(__le32 *)data); + brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); + + return err; +} +static inline +s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_cmd_int_get(ifp, cmd, data); +} + +s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, + const void *data, u32 len); s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data, u32 len); -s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data); -s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data); - -s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data, - u32 len); -s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data, - u32 len); -s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data); -s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data); +static inline +s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le)); +} +static inline +s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data) +{ + s32 err; + + err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data)); + if (err == 0) + *data = le32_to_cpu(*(__le32 *)data); + return err; +} +static inline +s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_iovar_int_get(ifp, name, data); +} + + +s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, + void *data, u32 len); +s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, + void *data, u32 len); +static inline +s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return brcmf_fil_bsscfg_data_set(ifp, name, &data_le, + sizeof(data_le)); +} +static inline +s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data) +{ + s32 err; + + err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data)); + if (err == 0) + *data = le32_to_cpu(*(__le32 *)data); + return err; +} +static inline +s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_bsscfg_int_get(ifp, name, data); +} + s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id, void *data, u32 len); s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id, void *data, u32 len); -s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data); -s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data); -s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data); -s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data); +static inline +s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, + u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le, + sizeof(data_le)); +} +static inline +s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, + u32 *data) +{ + __le32 data_le = cpu_to_le32(*data); + s32 err; + + err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); + if (err == 0) + *data = le32_to_cpu(data_le); + return err; +} +static inline +s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, + u8 *data) +{ + return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data)); +} +static inline +s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, + u16 *data) +{ + __le16 data_le = cpu_to_le16(*data); + s32 err; + + err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); + if (err == 0) + *data = le16_to_cpu(data_le); + return err; +}
#endif /* _fwil_h_ */ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index cc71b513adf9..cebd3c91756f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -152,6 +152,17 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, };
+const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_BZ, + .base_params = &iwl_bz_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, +}; + const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const struct iwl_cfg iwl_cfg_bz = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f45f645ca648..dd3913617bb0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -493,6 +493,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg; extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 243eccc68cb0..f7bec6f3d758 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -103,7 +103,7 @@ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true -#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 +#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index dea4d6478b4f..4a2de79f2e86 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -501,10 +501,38 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */ - {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */ diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 85bffcf4f6fb..bf4541e76ba2 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1503,7 +1503,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) { - struct mt76_phy *phy = dev->phys[wcid->phy_idx]; + struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx); struct ieee80211_hw *hw; struct sk_buff_head list; struct sk_buff *skb; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index b3a61b0ddd03..525444953df6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; - u8 tid = 0, hwq = 0; + u8 qid, tid = 0, hwq = 0; void *priv; int idx; u32 val; @@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) if (ieee80211_is_data_qos(hdr->frame_control)) { tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TAG1D_MASK; - u8 qid = tid_to_ac[tid]; + qid = tid_to_ac[tid]; hwq = wmm_queue_map[qid]; skb_set_queue_mapping(skb, qid); } else if (ieee80211_is_data(hdr->frame_control)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 18a50ccff106..f22a1aa88505 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, mt7615_hwmon_groups); if (IS_ERR(hwmon)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 35fdf4f98d80..e6af7318a9e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM;
cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops); if (!IS_ERR(cdev)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 260fe00d7dc6..27655dcb7914 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -557,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | MT_WF_RFCR_DROP_RTS | - MT_WF_RFCR_DROP_CTL_RSV | - MT_WF_RFCR_DROP_NDPA); + MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags; mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index ff63f37f67d9..61de6b03fa0b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7921_hwmon_groups); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index 2016ed9197fe..aee531cab46f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -561,8 +561,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; - if (vif == NL80211_IFTYPE_AP) - elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, sts - 1) | @@ -570,6 +568,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, sts - 1); elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP) + return; + + elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; elem->phy_cap_info[6] |= c; @@ -729,7 +732,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
eht_cap_elem->phy_cap_info[0] = - IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; @@ -743,30 +745,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)), IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | u8_encode_bits(val, - IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) | - u8_encode_bits(val, - IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
eht_cap_elem->phy_cap_info[2] = u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) | - u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) | - u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK); + + if (band == NL80211_BAND_6GHZ) { + eht_cap_elem->phy_cap_info[0] |= + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + + eht_cap_elem->phy_cap_info[1] |= + u8_encode_bits(val, + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); + + eht_cap_elem->phy_cap_info[2] |= + u8_encode_bits(sts - 1, + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); + }
eht_cap_elem->phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | - IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] = u8_encode_bits(min_t(int, sts - 1, 2), IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] = - IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US, IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) | u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)), @@ -780,14 +788,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) | u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
- eht_cap_elem->phy_cap_info[7] = - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; - val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) | u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX); #define SET_EHT_MAX_NSS(_bw, _val) do { \ @@ -798,8 +798,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
SET_EHT_MAX_NSS(80, val); SET_EHT_MAX_NSS(160, val); - SET_EHT_MAX_NSS(320, val); + if (band == NL80211_BAND_6GHZ) + SET_EHT_MAX_NSS(320, val); #undef SET_EHT_MAX_NSS + + if (iftype != NL80211_IFTYPE_AP) + return; + + eht_cap_elem->phy_cap_info[3] |= + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK; + + eht_cap_elem->phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ; + + if (band != NL80211_BAND_6GHZ) + return; + + eht_cap_elem->phy_cap_info[7] |= + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; }
static void diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index 7fea9f0d409b..0e69f0a50861 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -190,7 +190,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mvif->mt76.omac_idx = idx; mvif->phy = phy; mvif->mt76.band_idx = band_idx; - mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; + mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
ret = mt7996_mcu_add_dev_info(phy, vif, true); if (ret) @@ -287,6 +287,10 @@ int mt7996_set_channel(struct mt7996_phy *phy) if (ret) goto out;
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH); + if (ret) + goto out; + ret = mt7996_dfs_init_radar_detector(phy); mt7996_mac_cca_stats_reset(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index b66f712e1b17..302171e10359 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -520,13 +520,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb) static struct tlv * mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len) { - struct tlv *ptlv, tlv = { - .tag = cpu_to_le16(tag), - .len = cpu_to_le16(len), - }; + struct tlv *ptlv = skb_put_zero(skb, len);
- ptlv = skb_put(skb, len); - memcpy(ptlv, &tlv, sizeof(tlv)); + ptlv->tag = cpu_to_le16(tag); + ptlv->len = cpu_to_le16(len);
return ptlv; } @@ -1188,10 +1185,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
if (bfee) return vif->bss_conf.eht_su_beamformee && - EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); + EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); else return vif->bss_conf.eht_su_beamformer && - EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); + EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); }
if (sta->deflink.he_cap.has_he) { @@ -1303,6 +1300,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map); u8 snd_dim, sts;
+ if (!vc) + return; + bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7996_mcu_sta_sounding_rate(bf); @@ -1412,7 +1412,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_phy *phy = mvif->phy; - int tx_ant = hweight8(phy->mt76->chainmask) - 1; + int tx_ant = hweight16(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; const u8 matrix[4][4] = { @@ -2072,7 +2072,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, info = IEEE80211_SKB_CB(skb); info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*bcn) + MT_TXD_SIZE + skb->len; + len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4); tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len); bcn = (struct bss_bcn_content_tlv *)tlv; bcn->enable = en; @@ -2141,8 +2141,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, info->band = band; info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*discov) + MT_TXD_SIZE + skb->len; - + len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4); tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
discov = (struct bss_inband_discovery_tlv *)tlv; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index dc8d0a30c707..58504b80eae8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -587,10 +587,10 @@ enum { sizeof(struct sta_rec_hdr_trans) + \ sizeof(struct tlv))
-#define MT7996_MAX_BEACON_SIZE 1342 +#define MT7996_MAX_BEACON_SIZE 1338 #define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \ sizeof(struct bss_bcn_content_tlv) + \ - MT_TXD_SIZE + \ + 4 + MT_TXD_SIZE + \ sizeof(struct bss_bcn_cntdwn_tlv) + \ sizeof(struct bss_bcn_mbss_tlv)) #define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \ diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index e4bb3ea6e226..25e881ee727c 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -381,6 +381,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct wilc_join_bss_param *param; u8 rates_len = 0; int ies_len; + u64 ies_tsf; int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL); @@ -396,6 +397,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, return NULL; } ies_len = ies->len; + ies_tsf = ies->tsf; rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval); @@ -451,7 +453,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *)&noa_attr, sizeof(noa_attr)); if (ret > 0) { - param->tsf_lo = cpu_to_le32(ies->tsf); + param->tsf_lo = cpu_to_le32(ies_tsf); param->noa_enabled = 1; param->idx = noa_attr.index; if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) { diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 86467d2f8888..d35f26919806 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_efuse *efuse = &rtwdev->efuse; u8 table_case, tdma_case; - bool wl_cpt_test = false, bt_cpt_test = false;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); if (efuse->share_ant) { /* Shared-Ant */ - if (wl_cpt_test) { - if (coex_stat->wl_gl_busy) { - table_case = 20; - tdma_case = 17; - } else { - table_case = 10; - tdma_case = 15; - } - } else if (bt_cpt_test) { - table_case = 26; - tdma_case = 26; - } else { - if (coex_stat->wl_gl_busy && - coex_stat->wl_noisy_level == 0) - table_case = 14; - else - table_case = 10; + if (coex_stat->wl_gl_busy && + coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10;
- if (coex_stat->wl_gl_busy) - tdma_case = 15; - else - tdma_case = 20; - } + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; } else { /* Non-Shared-Ant */ table_case = 112; @@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) tdma_case = 120; }
- if (wl_cpt_test) - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]); - else - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index a1b674e3caaa..3596cf99c2ed 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1388,10 +1388,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, val |= BIT_ENSWBCN >> 8; rtw_write8(rtwdev, REG_CR + 1, val);
- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); - bckp[1] = val; - val &= ~(BIT_EN_BCNQ_DL >> 16); - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) { + val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); + bckp[1] = val; + val &= ~(BIT_EN_BCNQ_DL >> 16); + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + }
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); if (ret) { @@ -1416,7 +1418,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, rsvd_pg_head = rtwdev->fifo.rsvd_boundary; rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, rsvd_pg_head | BIT_BCN_VALID_V1); - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret; diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 63673005c2fb..b90ea6c88b15 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1314,20 +1314,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; + int ret = 0;
fw = &rtwdev->fw; wait_for_completion(&fw->completion); if (!fw->firmware) - return -EINVAL; + ret = -EINVAL;
if (chip->wow_fw_name) { fw = &rtwdev->wow_fw; wait_for_completion(&fw->completion); if (!fw->firmware) - return -EINVAL; + ret = -EINVAL; }
- return 0; + return ret; }
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c index e2c7d9f87683..a019f4085e73 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c @@ -31,8 +31,6 @@ static const struct usb_device_id rtw_8821cu_id_table[] = { .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index cd965edc29ce..3fe5c70ce731 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -2611,12 +2611,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, else rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
- if (rxsc >= 9 && rxsc <= 12) + if (rxsc == 0) + bw = rtwdev->hal.current_band_width; + else if (rxsc >= 1 && rxsc <= 8) + bw = RTW_CHANNEL_WIDTH_20; + else if (rxsc >= 9 && rxsc <= 12) bw = RTW_CHANNEL_WIDTH_40; - else if (rxsc >= 13) - bw = RTW_CHANNEL_WIDTH_80; else - bw = RTW_CHANNEL_WIDTH_20; + bw = RTW_CHANNEL_WIDTH_80;
channel = GET_PHY_STAT_P1_CHANNEL(phy_status); rtw_set_rx_freq_band(pkt_stat, channel); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 9ab836d0d4f1..079b8cd79785 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev) ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->ntb.pdev), debugfs_dir); - if (!ndev->debugfs_dir) + if (IS_ERR(ndev->debugfs_dir)) ndev->debugfs_info = NULL; else ndev->debugfs_info = diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index f9e7847a378e..c84fadfc63c5 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -807,16 +807,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) }
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, - struct device *dma_dev, size_t align) + struct device *ntb_dev, size_t align) { dma_addr_t dma_addr; void *alloc_addr, *virt_addr; int rc;
- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, - &dma_addr, GFP_KERNEL); + /* + * The buffer here is allocated against the NTB device. The reason to + * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer + * backing the NTB BAR for the remote host to write to. During receive + * processing, the data is being copied out of the receive buffer to + * the kernel skbuff. When a DMA device is being used, dma_map_page() + * is called on the kvaddr of the receive buffer (from dma_alloc_*()) + * and remapped against the DMA device. It appears to be a double + * DMA mapping of buffers, but first is mapped to the NTB device and + * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary + * in order for the later dma_map_page() to not fail. + */ + alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size, + &dma_addr, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); if (!alloc_addr) { - dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", + dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n", mw->alloc_size); return -ENOMEM; } @@ -845,7 +858,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, return 0;
err: - dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); + dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc; } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 553f1f46bc66..72bc1d017a46 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf, "\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos, - "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr); + "\tOut buff phys addr %pap\n", &peer->out_phys_addr);
pos += scnprintf(buf + pos, buf_size - pos, "\tOut buffer size %pa\n", &peer->outbuf_size); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 07177eadc56e..1ea8c27e8874 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1927,12 +1927,16 @@ static int cmp_dpa(const void *a, const void *b) static struct device **scan_labels(struct nd_region *nd_region) { int i, count = 0; - struct device *dev, **devs = NULL; + struct device *dev, **devs; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+ devs = kcalloc(2, sizeof(dev), GFP_KERNEL); + if (!devs) + return NULL; + /* "safe" because create_namespace_pmem() might list_move() label_ent */ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; @@ -1951,12 +1955,14 @@ static struct device **scan_labels(struct nd_region *nd_region) goto err; if (i < count) continue; - __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); - if (!__devs) - goto err; - memcpy(__devs, devs, sizeof(dev) * count); - kfree(devs); - devs = __devs; + if (count) { + __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); + if (!__devs) + goto err; + memcpy(__devs, devs, sizeof(dev) * count); + kfree(devs); + devs = __devs; + }
dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { @@ -1983,11 +1989,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
/* Publish a zero-sized namespace for userspace to configure. */ nd_mapping_free_labels(nd_mapping); - - devs = kcalloc(2, sizeof(dev), GFP_KERNEL); - if (!devs) - goto err; - nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); if (!nspm) goto err; @@ -2026,11 +2027,10 @@ static struct device **scan_labels(struct nd_region *nd_region) return devs;
err: - if (devs) { - for (i = 0; devs[i]; i++) - namespace_pmem_release(devs[i]); - kfree(devs); - } + for (i = 0; devs[i]; i++) + namespace_pmem_release(devs[i]); + kfree(devs); + return NULL; }
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 645a6b132220..37ea0fa421da 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -585,7 +585,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) rc = device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); if (rc) { - clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); + clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); return; } nvme_add_ns_head_cdev(head); diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index b445ffe95e3f..0e29a76ca530 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -841,7 +841,8 @@ static int dra7xx_pcie_probe(struct platform_device *pdev) dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler, - IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + IRQF_SHARED | IRQF_ONESHOT, + "dra7xx-pcie-main", dra7xx); if (ret) { dev_err(dev, "failed to request irq\n"); goto err_gpio; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 74703362aeec..86b09b5d7f24 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -997,7 +997,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) ret = phy_power_on(imx6_pcie->phy); if (ret) { dev_err(dev, "waiting for PHY ready timeout!\n"); - goto err_phy_off; + goto err_phy_exit; } }
@@ -1012,8 +1012,9 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) return 0;
err_phy_off: - if (imx6_pcie->phy) - phy_exit(imx6_pcie->phy); + phy_power_off(imx6_pcie->phy); +err_phy_exit: + phy_exit(imx6_pcie->phy); err_clk_disable: imx6_pcie_clk_disable(imx6_pcie); err_reg_disable: diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index c1dedc83759c..c5475830c835 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -579,7 +579,7 @@ static void ks_pcie_quirk(struct pci_dev *dev) */ if (pci_match_id(am6_pci_devids, bridge)) { bridge_dev = pci_get_host_bridge_device(dev); - if (!bridge_dev && !bridge_dev->parent) + if (!bridge_dev || !bridge_dev->parent) return;
ks_pcie = dev_get_drvdata(bridge_dev->parent); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 2ee146767971..421697ec7591 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -415,12 +415,12 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, if (pcie->gpio_id_reset[i] < 0) continue;
- pcie->num_slots++; - if (pcie->num_slots > MAX_PCI_SLOTS) { + if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { dev_err(dev, "Too many PCI slots!\n"); ret = -EINVAL; goto put_node; } + pcie->num_slots++;
ret = of_pci_get_devfn(child); if (ret < 0) { diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 176686bdb15c..5b82098f32b7 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -80,8 +80,8 @@ #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) #define MSGF_MISC_SR_FATAL_DEV BIT(23) #define MSGF_MISC_SR_LINK_DOWN BIT(24) -#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) -#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) +#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25) +#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ MSGF_MISC_SR_RXMSG_OVER | \ @@ -96,8 +96,8 @@ MSGF_MISC_SR_NON_FATAL_DEV | \ MSGF_MISC_SR_FATAL_DEV | \ MSGF_MISC_SR_LINK_DOWN | \ - MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ - MSGF_MSIC_SR_LINK_BWIDTH) + MSGF_MISC_SR_LINK_AUTO_BWIDTH | \ + MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */ #define MSGF_LEG_SR_INTA BIT(0) @@ -301,10 +301,10 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) if (misc_stat & MSGF_MISC_SR_FATAL_DEV) dev_err(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) + if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH) dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) + if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH) dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */ @@ -373,7 +373,7 @@ static void nwl_mask_leg_irq(struct irq_data *data) u32 mask; u32 val;
- mask = 1 << (data->hwirq - 1); + mask = 1 << data->hwirq; raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); @@ -387,7 +387,7 @@ static void nwl_unmask_leg_irq(struct irq_data *data) u32 mask; u32 val;
- mask = 1 << (data->hwirq - 1); + mask = 1 << data->hwirq; raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); @@ -790,6 +790,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) return -ENODEV;
pcie = pci_host_bridge_priv(bridge); + platform_set_drvdata(pdev, pcie);
pcie->dev = dev; pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; @@ -813,13 +814,13 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = nwl_pcie_bridge_init(pcie); if (err) { dev_err(dev, "HW Initialization failed\n"); - return err; + goto err_clk; }
err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); - return err; + goto err_clk; }
bridge->sysdata = pcie; @@ -829,11 +830,24 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = nwl_pcie_enable_msi(pcie); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); - return err; + goto err_clk; } }
- return pci_host_probe(bridge); + err = pci_host_probe(bridge); + if (!err) + return 0; + +err_clk: + clk_disable_unprepare(pcie->clk); + return err; +} + +static void nwl_pcie_remove(struct platform_device *pdev) +{ + struct nwl_pcie *pcie = platform_get_drvdata(pdev); + + clk_disable_unprepare(pcie->clk); }
static struct platform_driver nwl_pcie_driver = { @@ -843,5 +857,6 @@ static struct platform_driver nwl_pcie_driver = { .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, + .remove_new = nwl_pcie_remove, }; builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 53e9e9788bd5..93f2f4dcf6d6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1208,7 +1208,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) if (delay > PCI_RESET_WAIT) { if (retrain) { retrain = false; - if (pcie_failed_link_retrain(bridge)) { + if (pcie_failed_link_retrain(bridge) == 0) { delay = 1; continue; } @@ -5017,7 +5017,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); }
- return pcie_wait_for_link_status(pdev, use_lt, !use_lt); + rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); + + /* + * Clear LBMS after a manual retrain so that the bit can be used + * to track link speed or width changes made by hardware itself + * in attempt to correct unreliable link operation. + */ + pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); + return rc; }
/** @@ -5875,8 +5883,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_restore(dev); - if (dev->subordinate) + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "bus reset"); pci_bus_restore_locked(dev->subordinate); + } } }
@@ -5910,8 +5920,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot) if (!dev->slot || dev->slot != slot) continue; pci_dev_restore(dev); - if (dev->subordinate) + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "slot reset"); pci_bus_restore_locked(dev->subordinate); + } } }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 2cc032e8cbb9..d5e9010a135a 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -530,7 +530,7 @@ void pci_acs_init(struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); -bool pcie_failed_link_retrain(struct pci_dev *dev); +int pcie_failed_link_retrain(struct pci_dev *dev); #else static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) @@ -545,9 +545,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) { return -ENOTTY; } -static inline bool pcie_failed_link_retrain(struct pci_dev *dev) +static inline int pcie_failed_link_retrain(struct pci_dev *dev) { - return false; + return -ENOTTY; } #endif
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ec4277d7835b..0b08ac45effb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -66,7 +66,7 @@ * apply this erratum workaround to any downstream ports as long as they * support Link Active reporting and have the Link Control 2 register. * Restrict the speed to 2.5GT/s then with the Target Link Speed field, - * request a retrain and wait 200ms for the data link to go up. + * request a retrain and check the result. * * If this turns out successful and we know by the Vendor:Device ID it is * safe to do so, then lift the restriction, letting the devices negotiate @@ -74,33 +74,45 @@ * firmware may have already arranged and lift it with ports that already * report their data link being up. * - * Return TRUE if the link has been successfully retrained, otherwise FALSE. + * Otherwise revert the speed to the original setting and request a retrain + * again to remove any residual state, ignoring the result as it's supposed + * to fail anyway. + * + * Return 0 if the link has been successfully retrained. Return an error + * if retraining was not needed or we attempted a retrain and it failed. */ -bool pcie_failed_link_retrain(struct pci_dev *dev) +int pcie_failed_link_retrain(struct pci_dev *dev) { static const struct pci_device_id ids[] = { { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */ {} }; u16 lnksta, lnkctl2; + int ret = -ENOTTY;
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) || !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) - return false; + return ret;
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == PCI_EXP_LNKSTA_LBMS) { + u16 oldlnkctl2 = lnkctl2; + pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) { + ret = pcie_retrain_link(dev, false); + if (ret) { pci_info(dev, "retraining failed\n"); - return false; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, + oldlnkctl2); + pcie_retrain_link(dev, true); + return ret; }
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); @@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev) lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) { + ret = pcie_retrain_link(dev, false); + if (ret) { pci_info(dev, "retraining failed\n"); - return false; + return ret; } }
- return true; + return ret; }
static ktime_t fixup_debug_start(struct pci_dev *dev, diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c index 19d459a36be5..818ce4424d34 100644 --- a/drivers/perf/alibaba_uncore_drw_pmu.c +++ b/drivers/perf/alibaba_uncore_drw_pmu.c @@ -408,7 +408,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data) }
/* clear common counter intr status */ - clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1); + clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status); writel(clr_status, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR); } diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 2c684e49a6fc..0b3ce7713645 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -24,14 +24,6 @@ #define CMN_NI_NODE_ID GENMASK_ULL(31, 16) #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
-#define CMN_NODEID_DEVID(reg) ((reg) & 3) -#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) -#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) -#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) -#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) -#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) -#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) - #define CMN_CHILD_INFO 0x0080 #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) @@ -43,6 +35,9 @@ #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+/* Currently XPs are the node type we can have most of; others top out at 128 */ +#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS + /* The CFG node has various info besides the discovery tree */ #define CMN_CFGM_PERIPH_ID_01 0x0008 #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0) @@ -78,7 +73,8 @@ /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
-/* HN-Ps are weird... */ +/* Some types are designed to coexist with another device in the same node */ +#define CMN_CCLA_PMU_EVENT_SEL 0x008 #define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */ @@ -281,16 +277,16 @@ struct arm_cmn_node { u16 id, logid; enum cmn_node_type type;
- int dtm; - union { - /* DN/HN-F/CXHA */ - struct { - u8 val : 4; - u8 count : 4; - } occupid[SEL_MAX]; - /* XP */ - u8 dtc; - }; + /* XP properties really, but replicated to children for convenience */ + u8 dtm; + s8 dtc; + u8 portid_bits:4; + u8 deviceid_bits:4; + /* DN/HN-F/CXHA */ + struct { + u8 val : 4; + u8 count : 4; + } occupid[SEL_MAX]; union { u8 event[4]; __le32 event_sel; @@ -361,49 +357,33 @@ struct arm_cmn { static int arm_cmn_hp_state;
struct arm_cmn_nodeid { - u8 x; - u8 y; u8 port; u8 dev; };
static int arm_cmn_xyidbits(const struct arm_cmn *cmn) { - return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); + return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1)); }
-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) +static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn) { struct arm_cmn_nodeid nid;
- if (cmn->num_xps == 1) { - nid.x = 0; - nid.y = 0; - nid.port = CMN_NODEID_1x1_PID(id); - nid.dev = CMN_NODEID_DEVID(id); - } else { - int bits = arm_cmn_xyidbits(cmn); - - nid.x = CMN_NODEID_X(id, bits); - nid.y = CMN_NODEID_Y(id, bits); - if (cmn->ports_used & 0xc) { - nid.port = CMN_NODEID_EXT_PID(id); - nid.dev = CMN_NODEID_EXT_DEVID(id); - } else { - nid.port = CMN_NODEID_PID(id); - nid.dev = CMN_NODEID_DEVID(id); - } - } + nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1); + nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1); return nid; }
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, const struct arm_cmn_node *dn) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); - int xp_idx = cmn->mesh_x * nid.y + nid.x; + int id = dn->id >> (dn->portid_bits + dn->deviceid_bits); + int bits = arm_cmn_xyidbits(cmn); + int x = id >> bits; + int y = id & ((1U << bits) - 1);
- return cmn->xps + xp_idx; + return cmn->xps + cmn->mesh_x * y + x; } static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, enum cmn_node_type type) @@ -489,13 +469,14 @@ static const char *arm_cmn_device_type(u8 type) } }
-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) +static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d) { struct arm_cmn *cmn = s->private; struct arm_cmn_node *dn; + u16 id = xp->id | d | (p << xp->deviceid_bits);
for (dn = cmn->dns; dn->type; dn++) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP) continue; @@ -503,10 +484,10 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) if (dn->type < CMN_TYPE_HNI) continue;
- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) + if (dn->id != id) continue;
- seq_printf(s, " #%-2d |", dn->logid); + seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); return; } seq_puts(s, " |"); @@ -519,33 +500,32 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, " X"); for (x = 0; x < cmn->mesh_x; x++) - seq_printf(s, " %d ", x); + seq_printf(s, " %-2d ", x); seq_puts(s, "\nY P D+"); y = cmn->mesh_y; while (y--) { int xp_base = cmn->mesh_x * y; + struct arm_cmn_node *xp = cmn->xps + xp_base; u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++) seq_puts(s, "--------+");
- seq_printf(s, "\n%d |", y); + seq_printf(s, "\n%-2d |", y); for (x = 0; x < cmn->mesh_x; x++) { - struct arm_cmn_node *xp = cmn->xps + xp_base + x; - for (p = 0; p < CMN_MAX_PORTS; p++) - port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); - seq_printf(s, " XP #%-2d |", xp_base + x); + port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p); + seq_printf(s, " XP #%-3d|", xp_base + x); }
seq_puts(s, "\n |"); for (x = 0; x < cmn->mesh_x; x++) { - u8 dtc = cmn->xps[xp_base + x].dtc; + s8 dtc = xp[x].dtc;
- if (dtc & (dtc - 1)) + if (dtc < 0) seq_puts(s, " DTC ?? |"); else - seq_printf(s, " DTC %ld |", __ffs(dtc)); + seq_printf(s, " DTC %d |", dtc); } seq_puts(s, "\n |"); for (x = 0; x < cmn->mesh_x; x++) @@ -557,10 +537,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) seq_puts(s, arm_cmn_device_type(port[p][x])); seq_puts(s, "\n 0|"); for (x = 0; x < cmn->mesh_x; x++) - arm_cmn_show_logid(s, x, y, p, 0); + arm_cmn_show_logid(s, xp + x, p, 0); seq_puts(s, "\n 1|"); for (x = 0; x < cmn->mesh_x; x++) - arm_cmn_show_logid(s, x, y, p, 1); + arm_cmn_show_logid(s, xp + x, p, 1); } seq_puts(s, "\n-----+"); } @@ -588,9 +568,8 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event { struct arm_cmn_node *dn; - u64 dtm_idx[4]; - unsigned int dtc_idx; - u8 dtcs_used; + u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)]; + s8 dtc_idx[CMN_MAX_DTCS]; u8 num_dns; u8 dtm_offset; bool wide_sel; @@ -600,6 +579,10 @@ struct arm_cmn_hw_event { #define for_each_hw_dn(hw, dn, i) \ for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
+/* @i is the DTC number, @idx is the counter index on that DTC */ +#define for_each_hw_dtc_idx(hw, i, idx) \ + for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0) + static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event) { BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target)); @@ -1429,12 +1412,11 @@ static void arm_cmn_init_counter(struct perf_event *event) { struct arm_cmn *cmn = to_cmn(event->pmu); struct arm_cmn_hw_event *hw = to_cmn_hw(event); - unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx); u64 count;
- for (i = 0; hw->dtcs_used & (1U << i); i++) { - writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt); - cmn->dtc[i].counters[hw->dtc_idx] = event; + for_each_hw_dtc_idx(hw, i, idx) { + writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + CMN_DT_PMEVCNT(idx)); + cmn->dtc[i].counters[idx] = event; }
count = arm_cmn_read_dtm(cmn, hw, false); @@ -1447,11 +1429,9 @@ static void arm_cmn_event_read(struct perf_event *event) struct arm_cmn_hw_event *hw = to_cmn_hw(event); u64 delta, new, prev; unsigned long flags; - unsigned int i;
- if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) { - i = __ffs(hw->dtcs_used); - delta = arm_cmn_read_cc(cmn->dtc + i); + if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) { + delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]); local64_add(delta, &event->count); return; } @@ -1461,8 +1441,8 @@ static void arm_cmn_event_read(struct perf_event *event) delta = new - prev;
local_irq_save(flags); - for (i = 0; hw->dtcs_used & (1U << i); i++) { - new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx); + for_each_hw_dtc_idx(hw, i, idx) { + new = arm_cmn_read_counter(cmn->dtc + i, idx); delta += new << 16; } local_irq_restore(flags); @@ -1518,7 +1498,7 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) int i;
if (type == CMN_TYPE_DTC) { - i = __ffs(hw->dtcs_used); + i = hw->dtc_idx[0]; writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); cmn->dtc[i].cc_active = true; } else if (type == CMN_TYPE_WP) { @@ -1549,7 +1529,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) int i;
if (type == CMN_TYPE_DTC) { - i = __ffs(hw->dtcs_used); + i = hw->dtc_idx[0]; cmn->dtc[i].cc_active = false; } else if (type == CMN_TYPE_WP) { int wp_idx = arm_cmn_wp_idx(event); @@ -1735,29 +1715,27 @@ static int arm_cmn_event_init(struct perf_event *event) hw->dn = arm_cmn_node(cmn, type); if (!hw->dn) return -EINVAL; + + memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx)); for (dn = hw->dn; dn->type == type; dn++) { if (bynodeid && dn->id != nodeid) { hw->dn++; continue; } hw->num_dns++; + if (dn->dtc < 0) + memset(hw->dtc_idx, 0, cmn->num_dtcs); + else + hw->dtc_idx[dn->dtc] = 0; + if (bynodeid) break; }
if (!hw->num_dns) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); - - dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", - nodeid, nid.x, nid.y, nid.port, nid.dev, type); + dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type); return -EINVAL; } - /* - * Keep assuming non-cycles events count in all DTC domains; turns out - * it's hard to make a worthwhile optimisation around this, short of - * going all-in with domain-local counter allocation as well. - */ - hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
return arm_cmn_validate_group(cmn, event); } @@ -1783,28 +1761,25 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, } memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
- for (i = 0; hw->dtcs_used & (1U << i); i++) - cmn->dtc[i].counters[hw->dtc_idx] = NULL; + for_each_hw_dtc_idx(hw, j, idx) + cmn->dtc[j].counters[idx] = NULL; }
static int arm_cmn_event_add(struct perf_event *event, int flags) { struct arm_cmn *cmn = to_cmn(event->pmu); struct arm_cmn_hw_event *hw = to_cmn_hw(event); - struct arm_cmn_dtc *dtc = &cmn->dtc[0]; struct arm_cmn_node *dn; enum cmn_node_type type = CMN_EVENT_TYPE(event); - unsigned int i, dtc_idx, input_sel; + unsigned int input_sel, i = 0;
if (type == CMN_TYPE_DTC) { - i = 0; while (cmn->dtc[i].cycles) if (++i == cmn->num_dtcs) return -ENOSPC;
cmn->dtc[i].cycles = event; - hw->dtc_idx = CMN_DT_NUM_COUNTERS; - hw->dtcs_used = 1U << i; + hw->dtc_idx[0] = i;
if (flags & PERF_EF_START) arm_cmn_event_start(event, 0); @@ -1812,17 +1787,22 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) }
/* Grab a free global counter first... */ - dtc_idx = 0; - while (dtc->counters[dtc_idx]) - if (++dtc_idx == CMN_DT_NUM_COUNTERS) - return -ENOSPC; - - hw->dtc_idx = dtc_idx; + for_each_hw_dtc_idx(hw, j, idx) { + if (j > 0) { + idx = hw->dtc_idx[0]; + } else { + idx = 0; + while (cmn->dtc[j].counters[idx]) + if (++idx == CMN_DT_NUM_COUNTERS) + return -ENOSPC; + } + hw->dtc_idx[j] = idx; + }
/* ...then the local counters to feed it. */ for_each_hw_dn(hw, dn, i) { struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; - unsigned int dtm_idx, shift; + unsigned int dtm_idx, shift, d = 0; u64 reg;
dtm_idx = 0; @@ -1841,14 +1821,14 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
tmp = dtm->wp_event[wp_idx ^ 1]; if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != - CMN_EVENT_WP_COMBINE(dtc->counters[tmp])) + CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp])) goto free_dtms;
input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; - dtm->wp_event[wp_idx] = dtc_idx; + dtm->wp_event[wp_idx] = hw->dtc_idx[d]; writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); } else { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + struct arm_cmn_nodeid nid = arm_cmn_nid(dn);
if (cmn->multi_dtm) nid.port %= 2; @@ -1865,7 +1845,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) dtm->input_sel[dtm_idx] = input_sel; shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx); dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); - dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; + dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift; dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); @@ -1893,7 +1873,7 @@ static void arm_cmn_event_del(struct perf_event *event, int flags) arm_cmn_event_stop(event, PERF_EF_UPDATE);
if (type == CMN_TYPE_DTC) - cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL; + cmn->dtc[hw->dtc_idx[0]].cycles = NULL; else arm_cmn_event_clear(cmn, event, hw->num_dns); } @@ -2074,7 +2054,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) { struct arm_cmn_node *dn, *xp; int dtc_idx = 0; - u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); if (!cmn->dtc) @@ -2084,23 +2063,28 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
+ if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) { + /* We do at least know that a DTC's XP must be in that DTC's domain */ + dn = arm_cmn_node(cmn, CMN_TYPE_DTC); + for (int i = 0; i < cmn->num_dtcs; i++) + arm_cmn_node_to_xp(cmn, dn + i)->dtc = i; + } + for (dn = cmn->dns; dn->type; dn++) { - if (dn->type == CMN_TYPE_XP) { - dn->dtc &= dtcs_present; + if (dn->type == CMN_TYPE_XP) continue; - }
xp = arm_cmn_node_to_xp(cmn, dn); + dn->portid_bits = xp->portid_bits; + dn->deviceid_bits = xp->deviceid_bits; + dn->dtc = xp->dtc; dn->dtm = xp->dtm; if (cmn->multi_dtm) - dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; + dn->dtm += arm_cmn_nid(dn).port / 2;
if (dn->type == CMN_TYPE_DTC) { - int err; - /* We do at least know that a DTC's XP must be in that DTC's domain */ - if (xp->dtc == 0xf) - xp->dtc = 1 << dtc_idx; - err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); + int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); + if (err) return err; } @@ -2258,26 +2242,35 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) cmn->mesh_x = xp->logid;
if (cmn->part == PART_CMN600) - xp->dtc = 0xf; + xp->dtc = -1; else - xp->dtc = 1 << arm_cmn_dtc_domain(cmn, xp_region); + xp->dtc = arm_cmn_dtc_domain(cmn, xp_region);
xp->dtm = dtm - cmn->dtms; arm_cmn_init_dtm(dtm++, xp, 0); /* * Keeping track of connected ports will let us filter out - * unnecessary XP events easily. We can also reliably infer the - * "extra device ports" configuration for the node ID format - * from this, since in that case we will see at least one XP - * with port 2 connected, for the HN-D. + * unnecessary XP events easily, and also infer the per-XP + * part of the node ID format. */ for (int p = 0; p < CMN_MAX_PORTS; p++) if (arm_cmn_device_connect_info(cmn, xp, p)) xp_ports |= BIT(p);
- if (cmn->multi_dtm && (xp_ports & 0xc)) + if (cmn->num_xps == 1) { + xp->portid_bits = 3; + xp->deviceid_bits = 2; + } else if (xp_ports > 0x3) { + xp->portid_bits = 2; + xp->deviceid_bits = 1; + } else { + xp->portid_bits = 1; + xp->deviceid_bits = 2; + } + + if (cmn->multi_dtm && (xp_ports > 0x3)) arm_cmn_init_dtm(dtm++, xp, 1); - if (cmn->multi_dtm && (xp_ports & 0x30)) + if (cmn->multi_dtm && (xp_ports > 0xf)) arm_cmn_init_dtm(dtm++, xp, 2);
cmn->ports_used |= xp_ports; @@ -2332,10 +2325,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_CXHA: case CMN_TYPE_CCRA: case CMN_TYPE_CCHA: - case CMN_TYPE_CCLA: case CMN_TYPE_HNS: dn++; break; + case CMN_TYPE_CCLA: + dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL; + dn++; + break; /* Nothing to see here */ case CMN_TYPE_MPAM_S: case CMN_TYPE_MPAM_NS: @@ -2353,7 +2349,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_HNP: case CMN_TYPE_CCLA_RNI: dn[1] = dn[0]; - dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; + dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL; dn[1].type = arm_cmn_subtype(dn->type); dn += 2; break; diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index 430ca15373fe..4a902da5c1d4 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -221,7 +221,7 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event) struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 port, trig_len, thr_len, len_mode; - u64 reg = HISI_PCIE_INIT_SET; + u64 reg = 0;
/* Config HISI_PCIE_EVENT_CTRL according to event. */ reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); @@ -458,10 +458,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event) struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + u64 orig_cnt, cnt; + + orig_cnt = hisi_pcie_pmu_read_counter(event);
local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); + + /* + * The counter maybe unwritable if the target event is unsupported. + * Check this by comparing the counts after setting the period. If + * the counts stay unchanged after setting the period then update + * the hwc->prev_count correctly. Otherwise the final counts user + * get maybe totally wrong. + */ + cnt = hisi_pcie_pmu_read_counter(event); + if (orig_cnt == cnt) + local64_set(&hwc->prev_count, cnt); }
static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c index f80630a74d34..d099a7f25f64 100644 --- a/drivers/pinctrl/bcm/pinctrl-ns.c +++ b/drivers/pinctrl/bcm/pinctrl-ns.c @@ -7,11 +7,11 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/slab.h>
#include "../core.h" @@ -208,7 +208,6 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = { static int ns_pinctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - const struct of_device_id *of_id; struct ns_pinctrl *ns_pinctrl; struct pinctrl_desc *pctldesc; struct pinctrl_pin_desc *pin; @@ -225,10 +224,7 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
ns_pinctrl->dev = dev;
- of_id = of_match_device(ns_pinctrl_of_match_table, dev); - if (!of_id) - return -EINVAL; - ns_pinctrl->chipset_flag = (uintptr_t)of_id->data; + ns_pinctrl->chipset_flag = (uintptr_t)device_get_match_data(dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cru_gpio_control"); diff --git a/drivers/pinctrl/berlin/berlin-bg2.c b/drivers/pinctrl/berlin/berlin-bg2.c index acbd413340e8..15aed4467627 100644 --- a/drivers/pinctrl/berlin/berlin-bg2.c +++ b/drivers/pinctrl/berlin/berlin-bg2.c @@ -8,8 +8,9 @@ */
#include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h>
#include "berlin.h" @@ -227,10 +228,7 @@ static const struct of_device_id berlin2_pinctrl_match[] = {
static int berlin2_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(berlin2_pinctrl_match, &pdev->dev); - - return berlin_pinctrl_probe(pdev, match->data); + return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev)); }
static struct platform_driver berlin2_pinctrl_driver = { diff --git a/drivers/pinctrl/berlin/berlin-bg2cd.c b/drivers/pinctrl/berlin/berlin-bg2cd.c index c0f5d86d5d01..73a1d8c23088 100644 --- a/drivers/pinctrl/berlin/berlin-bg2cd.c +++ b/drivers/pinctrl/berlin/berlin-bg2cd.c @@ -8,8 +8,9 @@ */
#include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h>
#include "berlin.h" @@ -172,10 +173,7 @@ static const struct of_device_id berlin2cd_pinctrl_match[] = {
static int berlin2cd_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(berlin2cd_pinctrl_match, &pdev->dev); - - return berlin_pinctrl_probe(pdev, match->data); + return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev)); }
static struct platform_driver berlin2cd_pinctrl_driver = { diff --git a/drivers/pinctrl/berlin/berlin-bg2q.c b/drivers/pinctrl/berlin/berlin-bg2q.c index 20a3216ede07..a5dbc8f279e7 100644 --- a/drivers/pinctrl/berlin/berlin-bg2q.c +++ b/drivers/pinctrl/berlin/berlin-bg2q.c @@ -8,8 +8,9 @@ */
#include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h>
#include "berlin.h" @@ -389,10 +390,7 @@ static const struct of_device_id berlin2q_pinctrl_match[] = {
static int berlin2q_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(berlin2q_pinctrl_match, &pdev->dev); - - return berlin_pinctrl_probe(pdev, match->data); + return berlin_pinctrl_probe(pdev, device_get_match_data(&pdev->dev)); }
static struct platform_driver berlin2q_pinctrl_driver = { diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c index 3026a3b3da2d..9bf0a54f2798 100644 --- a/drivers/pinctrl/berlin/berlin-bg4ct.c +++ b/drivers/pinctrl/berlin/berlin-bg4ct.c @@ -8,8 +8,9 @@ */
#include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h>
#include "berlin.h" @@ -449,8 +450,8 @@ static const struct of_device_id berlin4ct_pinctrl_match[] = {
static int berlin4ct_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(berlin4ct_pinctrl_match, &pdev->dev); + const struct berlin_pinctrl_desc *desc = + device_get_match_data(&pdev->dev); struct regmap_config *rmconfig; struct regmap *regmap; struct resource *res; @@ -473,7 +474,7 @@ static int berlin4ct_pinctrl_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap);
- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap); + return berlin_pinctrl_probe_regmap(pdev, desc, regmap); }
static struct platform_driver berlin4ct_pinctrl_driver = { diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c index b631c14813a7..fc0daec94e10 100644 --- a/drivers/pinctrl/berlin/pinctrl-as370.c +++ b/drivers/pinctrl/berlin/pinctrl-as370.c @@ -8,8 +8,9 @@ */
#include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h>
#include "berlin.h" @@ -330,8 +331,8 @@ static const struct of_device_id as370_pinctrl_match[] = {
static int as370_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(as370_pinctrl_match, &pdev->dev); + const struct berlin_pinctrl_desc *desc = + device_get_match_data(&pdev->dev); struct regmap_config *rmconfig; struct regmap *regmap; struct resource *res; @@ -354,7 +355,7 @@ static int as370_pinctrl_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap);
- return berlin_pinctrl_probe_regmap(pdev, match->data, regmap); + return berlin_pinctrl_probe_regmap(pdev, desc, regmap); }
static struct platform_driver as370_pinctrl_driver = { diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c index 040e418dbfc1..162dfc213669 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c @@ -12,8 +12,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -404,13 +404,8 @@ static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = { static int armada_38x_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info; - const struct of_device_id *match = - of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
- if (!match) - return -ENODEV; - - soc->variant = (unsigned) match->data & 0xff; + soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff; soc->controls = armada_38x_mpp_controls; soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls); soc->gpioranges = armada_38x_mpp_gpio_ranges; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c index c33f1cbaf661..d9c98faa7b0e 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c @@ -12,8 +12,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -386,13 +386,8 @@ static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = { static int armada_39x_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc = &armada_39x_pinctrl_info; - const struct of_device_id *match = - of_match_device(armada_39x_pinctrl_of_match, &pdev->dev);
- if (!match) - return -ENODEV; - - soc->variant = (unsigned) match->data & 0xff; + soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff; soc->controls = armada_39x_mpp_controls; soc->ncontrols = ARRAY_SIZE(armada_39x_mpp_controls); soc->gpioranges = armada_39x_mpp_gpio_ranges; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c index 89bab536717d..7becf2781a0b 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c @@ -13,7 +13,6 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h>
#include "pinctrl-mvebu.h" @@ -106,10 +105,8 @@ static struct pinctrl_gpio_range armada_ap806_mpp_gpio_ranges[] = { static int armada_ap806_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc = &armada_ap806_pinctrl_info; - const struct of_device_id *match = - of_match_device(armada_ap806_pinctrl_of_match, &pdev->dev);
- if (!match || !pdev->dev.parent) + if (!pdev->dev.parent) return -ENODEV;
soc->variant = 0; /* no variants for Armada AP806 */ diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c index 8ba8f3e9121f..9a250c491f33 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c @@ -12,9 +12,9 @@ #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> +#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -638,8 +638,6 @@ static void mvebu_pinctrl_assign_variant(struct mvebu_mpp_mode *m, static int armada_cp110_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc; - const struct of_device_id *match = - of_match_device(armada_cp110_pinctrl_of_match, &pdev->dev); int i;
if (!pdev->dev.parent) @@ -650,7 +648,7 @@ static int armada_cp110_pinctrl_probe(struct platform_device *pdev) if (!soc) return -ENOMEM;
- soc->variant = (unsigned long) match->data & 0xff; + soc->variant = (unsigned long)device_get_match_data(&pdev->dev) & 0xff; soc->controls = armada_cp110_mpp_controls; soc->ncontrols = ARRAY_SIZE(armada_cp110_mpp_controls); soc->modes = armada_cp110_mpp_modes; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c index 48e2a6c56a83..487825bfd125 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c @@ -19,8 +19,8 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/property.h> #include <linux/bitops.h>
#include "pinctrl-mvebu.h" @@ -568,14 +568,9 @@ static int armada_xp_pinctrl_resume(struct platform_device *pdev) static int armada_xp_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info; - const struct of_device_id *match = - of_match_device(armada_xp_pinctrl_of_match, &pdev->dev); int nregs;
- if (!match) - return -ENODEV; - - soc->variant = (unsigned) match->data & 0xff; + soc->variant = (unsigned)device_get_match_data(&pdev->dev) & 0xff;
switch (soc->variant) { case V_MV78230: diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c index bd74daa9ed66..dce601d99372 100644 --- a/drivers/pinctrl/mvebu/pinctrl-dove.c +++ b/drivers/pinctrl/mvebu/pinctrl-dove.c @@ -12,9 +12,9 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/mfd/syscon.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/property.h> #include <linux/regmap.h>
#include "pinctrl-mvebu.h" @@ -765,13 +765,11 @@ static int dove_pinctrl_probe(struct platform_device *pdev) { struct resource *res, *mpp_res; struct resource fb_res; - const struct of_device_id *match = - of_match_device(dove_pinctrl_of_match, &pdev->dev); struct mvebu_mpp_ctrl_data *mpp_data; void __iomem *base; - int i; + int i, ret;
- pdev->dev.platform_data = (void *)match->data; + pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
/* * General MPP Configuration Register is part of pdma registers. @@ -785,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) clk_prepare_enable(clk);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res); - if (IS_ERR(base)) - return PTR_ERR(base); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto err_probe; + }
mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols, sizeof(*mpp_data), GFP_KERNEL); - if (!mpp_data) - return -ENOMEM; + if (!mpp_data) { + ret = -ENOMEM; + goto err_probe; + }
dove_pinctrl_info.control_data = mpp_data; for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++) @@ -810,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) }
mpp4_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mpp4_base)) - return PTR_ERR(mpp4_base); + if (IS_ERR(mpp4_base)) { + ret = PTR_ERR(mpp4_base); + goto err_probe; + }
res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) { @@ -822,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) }
pmu_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmu_base)) - return PTR_ERR(pmu_base); + if (IS_ERR(pmu_base)) { + ret = PTR_ERR(pmu_base); + goto err_probe; + }
gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config"); if (IS_ERR(gconfmap)) { @@ -833,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) adjust_resource(&fb_res, (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14); gc_base = devm_ioremap_resource(&pdev->dev, &fb_res); - if (IS_ERR(gc_base)) - return PTR_ERR(gc_base); + if (IS_ERR(gc_base)) { + ret = PTR_ERR(gc_base); + goto err_probe; + } + gconfmap = devm_regmap_init_mmio(&pdev->dev, gc_base, &gc_regmap_config); - if (IS_ERR(gconfmap)) - return PTR_ERR(gconfmap); + if (IS_ERR(gconfmap)) { + ret = PTR_ERR(gconfmap); + goto err_probe; + } }
/* Warn on any missing DT resource */ @@ -846,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev) dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
return mvebu_pinctrl_probe(pdev); +err_probe: + clk_disable_unprepare(clk); + return ret; }
static struct platform_driver dove_pinctrl_driver = { diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c index d45c31f281c8..4789d7442f78 100644 --- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c +++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c @@ -11,8 +11,8 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -470,10 +470,7 @@ static const struct of_device_id kirkwood_pinctrl_of_match[] = {
static int kirkwood_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(kirkwood_pinctrl_of_match, &pdev->dev); - - pdev->dev.platform_data = (void *)match->data; + pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
return mvebu_pinctrl_simple_mmio_probe(pdev); } diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c index cc97d270be61..2b6ab7f2afc7 100644 --- a/drivers/pinctrl/mvebu/pinctrl-orion.c +++ b/drivers/pinctrl/mvebu/pinctrl-orion.c @@ -19,8 +19,8 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> +#include <linux/property.h>
#include "pinctrl-mvebu.h"
@@ -218,10 +218,7 @@ static const struct of_device_id orion_pinctrl_of_match[] = {
static int orion_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match = - of_match_device(orion_pinctrl_of_match, &pdev->dev); - - pdev->dev.platform_data = (void*)match->data; + pdev->dev.platform_data = (void*)device_get_match_data(&pdev->dev);
mpp_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mpp_base)) diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 6b90051af206..0cfa74365733 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -17,6 +17,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/types.h> @@ -985,7 +986,6 @@ static const struct of_device_id abx500_gpio_match[] = { static int abx500_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - const struct of_device_id *match; struct abx500_pinctrl *pct; unsigned int id = -1; int ret; @@ -1006,12 +1006,7 @@ static int abx500_gpio_probe(struct platform_device *pdev) pct->chip.parent = &pdev->dev; pct->chip.base = -1; /* Dynamic allocation */
- match = of_match_device(abx500_gpio_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "gpio dt not matching\n"); - return -ENODEV; - } - id = (unsigned long)match->data; + id = (unsigned long)device_get_match_data(&pdev->dev);
/* Poke in other ASIC variants here */ switch (id) { diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index e7d33093994b..445c61a4a7e5 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -16,9 +16,11 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -1840,7 +1842,6 @@ static int nmk_pinctrl_resume(struct device *dev)
static int nmk_pinctrl_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct device_node *np = pdev->dev.of_node; struct device_node *prcm_np; struct nmk_pinctrl *npct; @@ -1851,10 +1852,7 @@ static int nmk_pinctrl_probe(struct platform_device *pdev) if (!npct) return -ENOMEM;
- match = of_match_device(nmk_pinctrl_match, &pdev->dev); - if (!match) - return -ENODEV; - version = (unsigned int) match->data; + version = (unsigned int)device_get_match_data(&pdev->dev);
/* Poke in other ASIC variants here */ if (version == PINCTRL_NMK_STN8815) diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index ad30fd47a4bb..d7b66928a4e5 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -12,10 +12,9 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> +#include <linux/platform_device.h> #include <linux/pm.h> +#include <linux/property.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string_helpers.h> @@ -1302,8 +1301,8 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, if (!np) return -ENODEV;
- info->dev = dev; - info->ops = of_device_get_match_data(dev); + info->dev = &pdev->dev; + info->ops = device_get_match_data(&pdev->dev); at91_pinctrl_child_count(info, np);
/* @@ -1848,7 +1847,7 @@ static int at91_gpio_probe(struct platform_device *pdev) if (IS_ERR(at91_chip->regbase)) return PTR_ERR(at91_chip->regbase);
- at91_chip->ops = of_device_get_match_data(dev); + at91_chip->ops = device_get_match_data(dev); at91_chip->pioc_virq = irq;
at91_chip->clock = devm_clk_get_enabled(dev, NULL); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index bbe7cc894b1a..6c670203b3ac 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1918,7 +1918,8 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
- if (pinctrl_enable(pcs->pctl)) + ret = pinctrl_enable(pcs->pctl); + if (ret) goto free;
return 0; diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index cf0383f575d9..f4256a918165 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -11,12 +11,12 @@ #include <linux/gpio/driver.h> #include <linux/slab.h> #include <linux/module.h> -#include <linux/of_platform.h> -#include <linux/of_address.h> +#include <linux/of.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/device.h> #include <linux/platform_device.h> +#include <linux/property.h>
#include "pinctrl-lantiq.h"
@@ -1451,7 +1451,6 @@ MODULE_DEVICE_TABLE(of, xway_match);
static int pinmux_xway_probe(struct platform_device *pdev) { - const struct of_device_id *match; const struct pinctrl_xway_soc *xway_soc; int ret, i;
@@ -1460,10 +1459,8 @@ static int pinmux_xway_probe(struct platform_device *pdev) if (IS_ERR(xway_info.membase[0])) return PTR_ERR(xway_info.membase[0]);
- match = of_match_device(xway_match, &pdev->dev); - if (match) - xway_soc = (const struct pinctrl_xway_soc *) match->data; - else + xway_soc = device_get_match_data(&pdev->dev); + if (!xway_soc) xway_soc = &danube_pinctrl;
/* find out how many pads we have */ diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index 5370bbdf2e1a..451801acdc40 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -14,7 +14,8 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/seq_file.h> #include <linux/slab.h> @@ -272,6 +273,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, return r; }
+/** + * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device + * @data: IODelay device + * + * Deinitialize the IODelay device (basically just lock the region back up. + */ +static void ti_iodelay_pinconf_deinit_dev(void *data) +{ + struct ti_iodelay_device *iod = data; + const struct ti_iodelay_reg_data *reg = iod->reg_data; + + /* lock the iodelay region back again */ + regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, + reg->global_lock_mask, reg->global_lock_val); +} + /** * ti_iodelay_pinconf_init_dev() - Initialize IODelay device * @iod: iodelay device @@ -294,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) if (r) return r;
+ r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev, + iod); + if (r) + return r; + /* Read up Recalibration sequence done by bootloader */ r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val); if (r) @@ -352,21 +374,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) return 0; }
-/** - * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device - * @iod: IODelay device - * - * Deinitialize the IODelay device (basically just lock the region back up. - */ -static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod) -{ - const struct ti_iodelay_reg_data *reg = iod->reg_data; - - /* lock the iodelay region back again */ - regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, - reg->global_lock_mask, reg->global_lock_val); -} - /** * ti_iodelay_get_pingroup() - Find the group mapped by a group selector * @iod: iodelay device @@ -821,56 +828,48 @@ MODULE_DEVICE_TABLE(of, ti_iodelay_of_match); static int ti_iodelay_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = of_node_get(dev->of_node); - const struct of_device_id *match; + struct device_node *np __free(device_node) = of_node_get(dev->of_node); struct resource *res; struct ti_iodelay_device *iod; - int ret = 0; + int ret;
if (!np) { - ret = -EINVAL; dev_err(dev, "No OF node\n"); - goto exit_out; - } - - match = of_match_device(ti_iodelay_of_match, dev); - if (!match) { - ret = -EINVAL; - dev_err(dev, "No DATA match\n"); - goto exit_out; + return -EINVAL; }
iod = devm_kzalloc(dev, sizeof(*iod), GFP_KERNEL); - if (!iod) { - ret = -ENOMEM; - goto exit_out; - } + if (!iod) + return -ENOMEM; + iod->dev = dev; - iod->reg_data = match->data; + iod->reg_data = device_get_match_data(dev); + if (!iod->reg_data) { + dev_err(dev, "No DATA match\n"); + return -EINVAL; + }
/* So far We can assume there is only 1 bank of registers */ iod->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); - if (IS_ERR(iod->reg_base)) { - ret = PTR_ERR(iod->reg_base); - goto exit_out; - } + if (IS_ERR(iod->reg_base)) + return PTR_ERR(iod->reg_base); + iod->phys_base = res->start;
iod->regmap = devm_regmap_init_mmio(dev, iod->reg_base, iod->reg_data->regmap_config); if (IS_ERR(iod->regmap)) { dev_err(dev, "Regmap MMIO init failed.\n"); - ret = PTR_ERR(iod->regmap); - goto exit_out; + return PTR_ERR(iod->regmap); }
ret = ti_iodelay_pinconf_init_dev(iod); if (ret) - goto exit_out; + return ret;
ret = ti_iodelay_alloc_pins(dev, iod, res->start); if (ret) - goto exit_out; + return ret;
iod->desc.pctlops = &ti_iodelay_pinctrl_ops; /* no pinmux ops - we are pinconf */ @@ -881,42 +880,14 @@ static int ti_iodelay_probe(struct platform_device *pdev) ret = devm_pinctrl_register_and_init(dev, &iod->desc, iod, &iod->pctl); if (ret) { dev_err(dev, "Failed to register pinctrl\n"); - goto exit_out; + return ret; }
- platform_set_drvdata(pdev, iod); - - ret = pinctrl_enable(iod->pctl); - if (ret) - goto exit_out; - - return 0; - -exit_out: - of_node_put(np); - return ret; -} - -/** - * ti_iodelay_remove() - standard remove - * @pdev: platform device - * - * Return: 0 if all went fine, else appropriate error value. - */ -static int ti_iodelay_remove(struct platform_device *pdev) -{ - struct ti_iodelay_device *iod = platform_get_drvdata(pdev); - - ti_iodelay_pinconf_deinit_dev(iod); - - /* Expect other allocations to be freed by devm */ - - return 0; + return pinctrl_enable(iod->pctl); }
static struct platform_driver ti_iodelay_driver = { .probe = ti_iodelay_probe, - .remove = ti_iodelay_remove, .driver = { .name = DRIVER_NAME, .of_match_table = ti_iodelay_of_match, diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index 6ac5c80cfda2..7520b599eb3d 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -303,11 +303,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy, val->intval = reg & AXP209_FG_PERCENT; break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: return axp20x_batt->data->get_max_voltage(axp20x_batt, &val->intval);
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, ®); if (ret) return ret; @@ -455,10 +455,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy, struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
switch (psp) { - case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: return axp20x_set_voltage_min_design(axp20x_batt, val->intval);
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: @@ -493,8 +493,8 @@ static enum power_supply_property axp20x_battery_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, - POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_CAPACITY, };
@@ -502,8 +502,8 @@ static int axp20x_battery_prop_writeable(struct power_supply *psy, enum power_supply_property psp) { return psp == POWER_SUPPLY_PROP_STATUS || - psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN || - psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MIN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MAX || psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT || psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX; } diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index 17ac2ab78c4e..ab97dd7ca5cb 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -853,7 +853,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) /* program interrupt thresholds such that we should * get interrupt for every 'off' perc change in the soc */ - regmap_read(map, MAX17042_RepSOC, &soc); + if (chip->pdata->enable_current_sense) + regmap_read(map, MAX17042_RepSOC, &soc); + else + regmap_read(map, MAX17042_VFSOC, &soc); soc >>= 8; soc_tr = (soc + off) << 8; if (off < soc) diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 3dfe45ac300a..f1de4111e98d 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -738,7 +738,7 @@ static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim) { struct rapl_primitive_info *rpi = rp->priv->rpi;
- if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi) + if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi) return NULL;
return &rpi[prim]; diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 42f93d4c6ee3..53e9c304ae0a 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -148,7 +148,10 @@ static void parport_attach(struct parport *port) return; }
- index = ida_simple_get(&pps_client_index, 0, 0, GFP_KERNEL); + index = ida_alloc(&pps_client_index, GFP_KERNEL); + if (index < 0) + goto err_free_device; + memset(&pps_client_cb, 0, sizeof(pps_client_cb)); pps_client_cb.private = device; pps_client_cb.irq_func = parport_irq; @@ -159,7 +162,7 @@ static void parport_attach(struct parport *port) index); if (!device->pardev) { pr_err("couldn't register with %s\n", port->name); - goto err_free; + goto err_free_ida; }
if (parport_claim_or_block(device->pardev) < 0) { @@ -187,8 +190,9 @@ static void parport_attach(struct parport *port) parport_release(device->pardev); err_unregister_dev: parport_unregister_device(device->pardev); -err_free: - ida_simple_remove(&pps_client_index, index); +err_free_ida: + ida_free(&pps_client_index, index); +err_free_device: kfree(device); }
@@ -208,7 +212,7 @@ static void parport_detach(struct parport *port) pps_unregister_source(device->pps); parport_release(pardev); parport_unregister_device(pardev); - ida_simple_remove(&pps_client_index, device->index); + ida_free(&pps_client_index, device->index); kfree(device); }
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 1b65e5e4e40f..59e71fd0db43 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -768,7 +768,7 @@ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, name[i] = '\0'; tmp = regulator_get(dev, name); if (IS_ERR(tmp)) { - ret = -EINVAL; + ret = PTR_ERR(tmp); goto error; } (*consumers)[n].consumer = tmp; diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index cfee164dd645..d68d4b22f528 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -213,7 +213,7 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = { /* QSPI Code - alias */ { 0x08000000, 0x08000000, 0x08000000, 0 }, /* DDR (Code) - alias */ - { 0x10000000, 0x80000000, 0x0FFE0000, 0 }, + { 0x10000000, 0x40000000, 0x0FFE0000, 0 }, /* TCML */ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM}, /* TCMU */ @@ -1134,6 +1134,8 @@ static int imx_rproc_probe(struct platform_device *pdev) goto err_put_rproc; }
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); + ret = imx_rproc_xtr_mbox_init(rproc); if (ret) goto err_put_wkq; @@ -1152,8 +1154,6 @@ static int imx_rproc_probe(struct platform_device *pdev) if (ret) goto err_put_scu;
- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); - if (rproc->state != RPROC_DETACHED) rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c index 2537ec05ecee..578fe867080c 100644 --- a/drivers/reset/reset-berlin.c +++ b/drivers/reset/reset-berlin.c @@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
static int berlin2_reset_probe(struct platform_device *pdev) { - struct device_node *parent_np = of_get_parent(pdev->dev.of_node); + struct device_node *parent_np; struct berlin_reset_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
+ parent_np = of_get_parent(pdev->dev.of_node); priv->regmap = syscon_node_to_regmap(parent_np); of_node_put(parent_np); if (IS_ERR(priv->regmap)) diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c index b62a2fd44e4e..e77e4cca377d 100644 --- a/drivers/reset/reset-k210.c +++ b/drivers/reset/reset-k210.c @@ -90,7 +90,7 @@ static const struct reset_control_ops k210_rst_ops = { static int k210_rst_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *parent_np = of_get_parent(dev->of_node); + struct device_node *parent_np; struct k210_rst *ksr;
dev_info(dev, "K210 reset controller\n"); @@ -99,6 +99,7 @@ static int k210_rst_probe(struct platform_device *pdev) if (!ksr) return -ENOMEM;
+ parent_np = of_get_parent(dev->of_node); ksr->map = syscon_node_to_regmap(parent_np); of_node_put(parent_np); if (IS_ERR(ksr->map)) diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index cea3a79d538e..00e245173320 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -1485,6 +1485,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char **data) { struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); int c = *count; unsigned char p = *phase; unsigned char *d = *data; @@ -1496,7 +1497,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, return -1; }
- NCR5380_to_ncmd(hostdata->connected)->phase = p; + ncmd->phase = p;
if (p & SR_IO) { if (hostdata->read_overruns) @@ -1608,45 +1609,44 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * request. */
- if (hostdata->flags & FLAG_DMA_FIXUP) { - if (p & SR_IO) { - /* - * The workaround was to transfer fewer bytes than we - * intended to with the pseudo-DMA read function, wait for - * the chip to latch the last byte, read it, and then disable - * pseudo-DMA mode. - * - * After REQ is asserted, the NCR5380 asserts DRQ and ACK. - * REQ is deasserted when ACK is asserted, and not reasserted - * until ACK goes false. Since the NCR5380 won't lower ACK - * until DACK is asserted, which won't happen unless we twiddle - * the DMA port or we take the NCR5380 out of DMA mode, we - * can guarantee that we won't handshake another extra - * byte. - */ - - if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ, BASR_DRQ, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); - } - if (NCR5380_poll_politely(hostdata, STATUS_REG, - SR_REQ, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); - } - d[*count - 1] = NCR5380_read(INPUT_DATA_REG); - } else { - /* - * Wait for the last byte to be sent. If REQ is being asserted for - * the byte we're interested, we'll ACK it and it will go false. - */ - if (NCR5380_poll_politely2(hostdata, - BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, - BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); + if ((hostdata->flags & FLAG_DMA_FIXUP) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + /* + * The workaround was to transfer fewer bytes than we + * intended to with the pseudo-DMA receive function, wait for + * the chip to latch the last byte, read it, and then disable + * DMA mode. + * + * After REQ is asserted, the NCR5380 asserts DRQ and ACK. + * REQ is deasserted when ACK is asserted, and not reasserted + * until ACK goes false. Since the NCR5380 won't lower ACK + * until DACK is asserted, which won't happen unless we twiddle + * the DMA port or we take the NCR5380 out of DMA mode, we + * can guarantee that we won't handshake another extra + * byte. + * + * If sending, wait for the last byte to be sent. If REQ is + * being asserted for the byte we're interested, we'll ACK it + * and it will go false. + */ + if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ, BASR_DRQ, 0)) { + if ((p & SR_IO) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + if (!NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, 0, 0)) { + d[c] = NCR5380_read(INPUT_DATA_REG); + --ncmd->this_residual; + } else { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: !REQ timeout\n"); + } } + } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: DRQ timeout\n"); } }
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c index 2e83a667901f..1a7437f4328e 100644 --- a/drivers/scsi/elx/libefc/efc_nport.c +++ b/drivers/scsi/elx/libefc/efc_nport.c @@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, spin_lock_irqsave(&efc->lock, flags); list_for_each_entry(nport, &domain->nport_list, list_entry) { if (nport->wwpn == wwpn && nport->wwnn == wwnn) { - kref_put(&nport->ref, nport->release); /* Shutdown this NPORT */ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); + kref_put(&nport->ref, nport->release); break; } } diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 2e511697fce3..2c88ce24d19a 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup); * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets * so bus errors are unavoidable. * - * If a MOVE.B instruction faults, we assume that zero bytes were transferred - * and simply retry. That assumption probably depends on target behaviour but - * seems to hold up okay. The NOP provides synchronization: without it the - * fault can sometimes occur after the program counter has moved past the - * offending instruction. Post-increment addressing can't be used. + * If a MOVE.B instruction faults during a receive operation, we assume the + * target sent nothing and try again. That assumption probably depends on + * target firmware but it seems to hold up okay. If a fault happens during a + * send operation, the target may or may not have seen /ACK and got the byte. + * It's uncertain so the whole SCSI command gets retried. + * + * The NOP is needed for synchronization because the fault address in the + * exception stack frame may or may not be the instruction that actually + * caused the bus error. Post-increment addressing can't be used. */
#define MOVE_BYTE(operands) \ @@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup); ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
-#define MAC_PDMA_DELAY 32 - static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) { unsigned char *addr = start; @@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) if (n >= 1) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -1; } if (n >= 1 && ((unsigned long)addr & 1)) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -2; } while (n >= 32) MOVE_16_WORDS("%0@+,%3@"); while (n >= 2) MOVE_WORD("%0@+,%3@"); if (result) - return start - addr; /* Negated to indicate uncertain length */ + return start - addr - 1; /* Negated to indicate uncertain length */ if (n == 1) MOVE_BYTE("%0@,%3@"); -out: return addr - start; }
@@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) out_be32(hostdata->io + (CTRL_REG << 4), value); }
+static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata) +{ + unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */ + unsigned char basr; + +again: + basr = NCR5380_read(BUS_AND_STATUS_REG); + + if (!(basr & BASR_PHASE_MATCH)) + return 1; + + if (basr & BASR_IRQ) + return -1; + + if (basr & BASR_DRQ) + return 0; + + if (n-- == 0) { + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: DRQ timeout\n", __func__); + return -1; + } + + NCR5380_poll_politely2(hostdata, + BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, + BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0); + goto again; +} + static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; - int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_recv(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) { d += bytes; @@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, }
if (hostdata->pdma_residual == 0) - goto out; + break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue;
- if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, d - dst, len, bytes, chunk_bytes);
- if (bytes >= 0) + if (bytes == 0) continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, d - dst, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; }
- scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; }
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, @@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, { unsigned char *s = src; u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); - int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_send(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) { s += bytes; hostdata->pdma_residual -= bytes; }
- if (hostdata->pdma_residual == 0) { - if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, - TCR_LAST_BYTE_SENT, - TCR_LAST_BYTE_SENT, - 0) < 0) { - scmd_printk(KERN_ERR, hostdata->connected, - "%s: Last Byte Sent timeout\n", __func__); - result = -1; - } - goto out; - } + if (hostdata->pdma_residual == 0) + break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue;
- if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, s - src, len, bytes, chunk_bytes);
- if (bytes >= 0) + if (bytes == 0) continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, s - src, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; }
- scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; }
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 4fce7cc32cb2..b0a574c534c4 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3118,7 +3118,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!vpd || vpd->len < 8) { + if (!vpd || vpd->len <= 8) { rcu_read_unlock(); return; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 868453b18c9a..2ae64cda8bc9 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2355,14 +2355,6 @@ static inline void pqi_mask_device(u8 *scsi3addr) scsi3addr[3] |= 0xc0; }
-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) -{ - if (pqi_is_logical_device(device)) - return false; - - return (device->path_map & (device->path_map - 1)) != 0; -} - static inline bool pqi_expose_device(struct pqi_scsi_dev *device) { return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); @@ -3259,14 +3251,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) int residual_count; int xfer_count; bool device_offline; - struct pqi_scsi_dev *device;
scmd = io_request->scmd; error_info = io_request->error_info; host_byte = DID_OK; sense_data_length = 0; device_offline = false; - device = scmd->device->hostdata;
switch (error_info->service_response) { case PQI_AIO_SERV_RESPONSE_COMPLETE: @@ -3291,14 +3281,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: pqi_aio_path_disabled(io_request); - if (pqi_is_multipath_device(device)) { - pqi_device_remove_start(device); - host_byte = DID_NO_CONNECT; - scsi_status = SAM_STAT_CHECK_CONDITION; - } else { - scsi_status = SAM_STAT_GOOD; - io_request->status = -EAGAIN; - } + scsi_status = SAM_STAT_GOOD; + io_request->status = -EAGAIN; break; case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c index e0527b9efd05..1bbc9af1e50b 100644 --- a/drivers/soc/fsl/qe/tsa.c +++ b/drivers/soc/fsl/qe/tsa.c @@ -140,7 +140,7 @@ static inline void tsa_write32(void __iomem *addr, u32 val) iowrite32be(val, addr); }
-static inline void tsa_write8(void __iomem *addr, u32 val) +static inline void tsa_write8(void __iomem *addr, u8 val) { iowrite8(val, addr); } diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index f9fd6177118c..577f1f25ab10 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -196,9 +196,6 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { struct qcom_smd_rpm *rpm;
- if (!rpdev->dev.of_node) - return -EINVAL; - rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; @@ -218,18 +215,38 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) of_platform_depopulate(&rpdev->dev); }
-static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = { - { .name = "rpm_requests", }, - { /* sentinel */ } +static const struct of_device_id qcom_smd_rpm_of_match[] = { + { .compatible = "qcom,rpm-apq8084" }, + { .compatible = "qcom,rpm-ipq6018" }, + { .compatible = "qcom,rpm-ipq9574" }, + { .compatible = "qcom,rpm-msm8226" }, + { .compatible = "qcom,rpm-msm8909" }, + { .compatible = "qcom,rpm-msm8916" }, + { .compatible = "qcom,rpm-msm8936" }, + { .compatible = "qcom,rpm-msm8953" }, + { .compatible = "qcom,rpm-msm8974" }, + { .compatible = "qcom,rpm-msm8976" }, + { .compatible = "qcom,rpm-msm8994" }, + { .compatible = "qcom,rpm-msm8996" }, + { .compatible = "qcom,rpm-msm8998" }, + { .compatible = "qcom,rpm-sdm660" }, + { .compatible = "qcom,rpm-sm6115" }, + { .compatible = "qcom,rpm-sm6125" }, + { .compatible = "qcom,rpm-sm6375" }, + { .compatible = "qcom,rpm-qcm2290" }, + { .compatible = "qcom,rpm-qcs404" }, + {} }; -MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table); +MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
static struct rpmsg_driver qcom_smd_rpm_driver = { .probe = qcom_smd_rpm_probe, .remove = qcom_smd_rpm_remove, .callback = qcom_smd_rpm_callback, - .id_table = qcom_smd_rpm_id_table, - .drv.name = "qcom_smd_rpm", + .drv = { + .name = "qcom_smd_rpm", + .of_match_table = qcom_smd_rpm_of_match, + }, };
static int __init qcom_smd_rpm_init(void) diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c index bab4ad87aa75..d5099a3386b4 100644 --- a/drivers/soc/versatile/soc-integrator.c +++ b/drivers/soc/versatile/soc-integrator.c @@ -113,6 +113,7 @@ static int __init integrator_soc_init(void) return -ENODEV;
syscon_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap);
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c index c6876d232d8f..cf91abe07d38 100644 --- a/drivers/soc/versatile/soc-realview.c +++ b/drivers/soc/versatile/soc-realview.c @@ -4,6 +4,7 @@ * * Author: Linus Walleij linus.walleij@linaro.org */ +#include <linux/device.h> #include <linux/init.h> #include <linux/io.h> #include <linux/slab.h> @@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = {
ATTRIBUTE_GROUPS(realview);
+static void realview_soc_socdev_release(void *data) +{ + struct soc_device *soc_dev = data; + + soc_device_unregister(soc_dev); +} + static int realview_soc_probe(struct platform_device *pdev) { struct regmap *syscon_regmap; @@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev) if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap);
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM;
@@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev) soc_dev_attr->family = "Versatile"; soc_dev_attr->custom_attr_group = realview_groups[0]; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr); + if (IS_ERR(soc_dev)) return -ENODEV; - } + + ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release, + soc_dev); + if (ret) + return ret; + ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET, &realview_coreid); if (ret) diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 4cc4f32ca449..6f9e9d871677 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, * If the QSPI controller is set in regular SPI mode, set it in * Serial Memory Mode (SMM). */ - if (aq->mr != QSPI_MR_SMM) { - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + if (!(aq->mr & QSPI_MR_SMM)) { + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); }
/* Clear pending interrupts */ @@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi) if (ret < 0) return ret;
- aq->scr = QSPI_SCR_SCBR(scbr); + aq->scr &= ~QSPI_SCR_SCBR_MASK; + aq->scr |= QSPI_SCR_SCBR(scbr); atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent); @@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) if (ret < 0) return ret;
+ aq->scr &= ~QSPI_SCR_DLYBS_MASK; aq->scr |= QSPI_SCR_DLYBS(cs_setup); atmel_qspi_write(aq->scr, aq, QSPI_SCR);
@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq) atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */ - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR);
/* Enable the QSPI controller */ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); @@ -726,6 +728,7 @@ static void atmel_qspi_remove(struct platform_device *pdev) clk_unprepare(aq->pclk);
pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); }
diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c index 9f64afd8164e..4965bc86d7f5 100644 --- a/drivers/spi/spi-bcmbca-hsspi.c +++ b/drivers/spi/spi-bcmbca-hsspi.c @@ -546,12 +546,14 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) goto out_put_host; }
- pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + goto out_put_host;
ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group); if (ret) { dev_err(&pdev->dev, "couldn't register sysfs group\n"); - goto out_pm_disable; + goto out_put_host; }
/* register and we are done */ @@ -565,8 +567,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
out_sysgroup_disable: sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group); -out_pm_disable: - pm_runtime_disable(&pdev->dev); out_put_host: spi_controller_put(host); out_disable_pll_clk: diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index f7e268cf9085..180cea7d3817 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -988,6 +988,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
fsl_lpspi_dma_exit(controller);
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev); pm_runtime_disable(fsl_lpspi->dev); }
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 93a9667f6bdc..bc6c086ddd43 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -57,13 +57,6 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h>
-/* - * The driver only uses one single LUT entry, that is updated on - * each call of exec_op(). Index 0 is preset at boot with a basic - * read operation, so let's use the last entry (31). - */ -#define SEQID_LUT 31 - /* Registers used by the driver */ #define FSPI_MCR0 0x00 #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) @@ -263,9 +256,6 @@ #define FSPI_TFDR 0x180
#define FSPI_LUT_BASE 0x200 -#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) -#define FSPI_LUT_REG(idx) \ - (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
/* register map end */
@@ -341,6 +331,7 @@ struct nxp_fspi_devtype_data { unsigned int txfifo; unsigned int ahb_buf_size; unsigned int quirks; + unsigned int lut_num; bool little_endian; };
@@ -349,6 +340,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ };
@@ -357,6 +349,7 @@ static struct nxp_fspi_devtype_data imx8mm_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ };
@@ -365,6 +358,7 @@ static struct nxp_fspi_devtype_data imx8qxp_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ };
@@ -373,6 +367,16 @@ static struct nxp_fspi_devtype_data imx8dxl_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = FSPI_QUIRK_USE_IP_ONLY, + .lut_num = 32, + .little_endian = true, /* little-endian */ +}; + +static struct nxp_fspi_devtype_data imx8ulp_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .lut_num = 16, .little_endian = true, /* little-endian */ };
@@ -544,6 +548,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, void __iomem *base = f->iobase; u32 lutval[4] = {}; int lutidx = 1, i; + u32 lut_offset = (f->devtype_data->lut_num - 1) * 4 * 4; + u32 target_lut_reg;
/* cmd */ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), @@ -588,8 +594,10 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
/* fill LUT */ - for (i = 0; i < ARRAY_SIZE(lutval); i++) - fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); + for (i = 0; i < ARRAY_SIZE(lutval); i++) { + target_lut_reg = FSPI_LUT_BASE + lut_offset + i * 4; + fspi_writel(f, lutval[i], base + target_lut_reg); + }
dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x], size: 0x%08x\n", op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes); @@ -876,7 +884,7 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) void __iomem *base = f->iobase; int seqnum = 0; int err = 0; - u32 reg; + u32 reg, seqid_lut;
reg = fspi_readl(f, base + FSPI_IPRXFCR); /* invalid RXFIFO first */ @@ -892,8 +900,9 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) * the LUT at each exec_op() call. And also specify the DATA * length, since it's has not been specified in the LUT. */ + seqid_lut = f->devtype_data->lut_num - 1; fspi_writel(f, op->data.nbytes | - (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | + (seqid_lut << FSPI_IPCR1_SEQID_SHIFT) | (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), base + FSPI_IPCR1);
@@ -1017,7 +1026,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) { void __iomem *base = f->iobase; int ret, i; - u32 reg; + u32 reg, seqid_lut;
/* disable and unprepare clock to avoid glitch pass to controller */ nxp_fspi_clk_disable_unprep(f); @@ -1092,11 +1101,17 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) fspi_writel(f, reg, base + FSPI_FLSHB1CR1); fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+ /* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry. + */ + seqid_lut = f->devtype_data->lut_num - 1; /* AHB Read - Set lut sequence ID for all CS. */ - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHA1CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHA2CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHB1CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHB2CR2);
f->selected = -1;
@@ -1291,6 +1306,7 @@ static const struct of_device_id nxp_fspi_dt_ids[] = { { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, }, { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, }, { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, }, + { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c index e982d3189fdc..1381563941fe 100644 --- a/drivers/spi/spi-ppc4xx.c +++ b/drivers/spi/spi-ppc4xx.c @@ -26,7 +26,6 @@ #include <linux/errno.h> #include <linux/wait.h> #include <linux/of_address.h> -#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -410,7 +409,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op) }
/* Request IRQ */ - hw->irqnum = irq_of_parse_and_map(np, 0); + ret = platform_get_irq(op, 0); + if (ret < 0) + goto free_host; + hw->irqnum = ret; + ret = request_irq(hw->irqnum, spi_ppc4xx_int, 0, "spi_ppc4xx_of", (void *)hw); if (ret) { diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index f9f40c0e9add..52cb1a3bb8c7 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -921,6 +921,48 @@ int tb_port_get_link_speed(struct tb_port *port) } }
+/** + * tb_port_get_link_generation() - Returns link generation + * @port: Lane adapter + * + * Returns link generation as number or negative errno in case of + * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2 + * links so for those always returns 2. + */ +int tb_port_get_link_generation(struct tb_port *port) +{ + int ret; + + ret = tb_port_get_link_speed(port); + if (ret < 0) + return ret; + + switch (ret) { + case 40: + return 4; + case 20: + return 3; + default: + return 2; + } +} + +static const char *width_name(enum tb_link_width width) +{ + switch (width) { + case TB_LINK_WIDTH_SINGLE: + return "symmetric, single lane"; + case TB_LINK_WIDTH_DUAL: + return "symmetric, dual lanes"; + case TB_LINK_WIDTH_ASYM_TX: + return "asymmetric, 3 transmitters, 1 receiver"; + case TB_LINK_WIDTH_ASYM_RX: + return "asymmetric, 3 receivers, 1 transmitter"; + default: + return "unknown"; + } +} + /** * tb_port_get_link_width() - Get current link width * @port: Port to check (USB4 or CIO) @@ -946,8 +988,15 @@ int tb_port_get_link_width(struct tb_port *port) LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; }
-static bool tb_port_is_width_supported(struct tb_port *port, - unsigned int width_mask) +/** + * tb_port_width_supported() - Is the given link width supported + * @port: Port to check + * @width: Widths to check (bitmask) + * + * Can be called to any lane adapter. Checks if given @width is + * supported by the hardware and returns %true if it is. + */ +bool tb_port_width_supported(struct tb_port *port, unsigned int width) { u32 phy, widths; int ret; @@ -955,20 +1004,23 @@ static bool tb_port_is_width_supported(struct tb_port *port, if (!port->cap_phy) return false;
+ if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { + if (tb_port_get_link_generation(port) < 4 || + !usb4_port_asym_supported(port)) + return false; + } + ret = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_0, 1); if (ret) return false;
- widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> - LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; - - return widths & width_mask; -} - -static bool is_gen4_link(struct tb_port *port) -{ - return tb_port_get_link_speed(port) > 20; + /* + * The field encoding is the same as &enum tb_link_width (which is + * passed to @width). + */ + widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); + return widths & width; }
/** @@ -998,15 +1050,23 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) switch (width) { case TB_LINK_WIDTH_SINGLE: /* Gen 4 link cannot be single */ - if (is_gen4_link(port)) + if (tb_port_get_link_generation(port) >= 4) return -EOPNOTSUPP; val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; break; + case TB_LINK_WIDTH_DUAL: + if (tb_port_get_link_generation(port) >= 4) + return usb4_port_asym_set_link_width(port, width); val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; break; + + case TB_LINK_WIDTH_ASYM_TX: + case TB_LINK_WIDTH_ASYM_RX: + return usb4_port_asym_set_link_width(port, width); + default: return -EINVAL; } @@ -1131,7 +1191,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port) /** * tb_port_wait_for_link_width() - Wait until link reaches specific width * @port: Port to wait for - * @width_mask: Expected link width mask + * @width: Expected link width (bitmask) * @timeout_msec: Timeout in ms how long to wait * * Should be used after both ends of the link have been bonded (or @@ -1140,14 +1200,15 @@ void tb_port_lane_bonding_disable(struct tb_port *port) * within the given timeout, %0 if it did. Can be passed a mask of * expected widths and succeeds if any of the widths is reached. */ -int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, +int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, int timeout_msec) { ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); int ret;
/* Gen 4 link does not support single lane */ - if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port)) + if ((width & TB_LINK_WIDTH_SINGLE) && + tb_port_get_link_generation(port) >= 4) return -EOPNOTSUPP;
do { @@ -1160,7 +1221,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, */ if (ret != -EACCES) return ret; - } else if (ret & width_mask) { + } else if (ret & width) { return 0; }
@@ -1210,6 +1271,9 @@ int tb_port_update_credits(struct tb_port *port) ret = tb_port_do_update_credits(port); if (ret) return ret; + + if (!port->dual_link_port) + return 0; return tb_port_do_update_credits(port->dual_link_port); }
@@ -2799,6 +2863,38 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw) return 0; }
+/* Must be called after tb_switch_update_link_attributes() */ +static void tb_switch_link_init(struct tb_switch *sw) +{ + struct tb_port *up, *down; + bool bonded; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return; + + tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); + tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width)); + + bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; + + /* + * Gen 4 links come up as bonded so update the port structures + * accordingly. + */ + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + up->bonded = bonded; + if (up->dual_link_port) + up->dual_link_port->bonded = bonded; + tb_port_update_credits(up); + + down->bonded = bonded; + if (down->dual_link_port) + down->dual_link_port->bonded = bonded; + tb_port_update_credits(down); +} + /** * tb_switch_lane_bonding_enable() - Enable lane bonding * @sw: Switch to enable lane bonding @@ -2807,24 +2903,20 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw) * switch. If conditions are correct and both switches support the feature, * lanes are bonded. It is safe to call this to any switch. */ -int tb_switch_lane_bonding_enable(struct tb_switch *sw) +static int tb_switch_lane_bonding_enable(struct tb_switch *sw) { struct tb_port *up, *down; - u64 route = tb_route(sw); - unsigned int width_mask; + unsigned int width; int ret;
- if (!route) - return 0; - if (!tb_switch_lane_bonding_possible(sw)) return 0;
up = tb_upstream_port(sw); down = tb_switch_downstream_port(sw);
- if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) || - !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL)) + if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) || + !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL)) return 0;
/* @@ -2848,21 +2940,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) }
/* Any of the widths are all bonded */ - width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | - TB_LINK_WIDTH_ASYM_RX; - - ret = tb_port_wait_for_link_width(down, width_mask, 100); - if (ret) { - tb_port_warn(down, "timeout enabling lane bonding\n"); - return ret; - } + width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | + TB_LINK_WIDTH_ASYM_RX;
- tb_port_update_credits(down); - tb_port_update_credits(up); - tb_switch_update_link_attributes(sw); - - tb_sw_dbg(sw, "lane bonding enabled\n"); - return ret; + return tb_port_wait_for_link_width(down, width, 100); }
/** @@ -2872,20 +2953,27 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) * Disables lane bonding between @sw and parent. This can be called even * if lanes were not bonded originally. */ -void tb_switch_lane_bonding_disable(struct tb_switch *sw) +static int tb_switch_lane_bonding_disable(struct tb_switch *sw) { struct tb_port *up, *down; int ret;
- if (!tb_route(sw)) - return; - up = tb_upstream_port(sw); if (!up->bonded) - return; + return 0;
- down = tb_switch_downstream_port(sw); + /* + * If the link is Gen 4 there is no way to switch the link to + * two single lane links so avoid that here. Also don't bother + * if the link is not up anymore (sw is unplugged). + */ + ret = tb_port_get_link_generation(up); + if (ret < 0) + return ret; + if (ret >= 4) + return -EOPNOTSUPP;
+ down = tb_switch_downstream_port(sw); tb_port_lane_bonding_disable(up); tb_port_lane_bonding_disable(down);
@@ -2893,15 +2981,160 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw) * It is fine if we get other errors as the router might have * been unplugged. */ - ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); - if (ret == -ETIMEDOUT) - tb_sw_warn(sw, "timeout disabling lane bonding\n"); + return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); +} + +/* Note updating sw->link_width done in tb_switch_update_link_attributes() */ +static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) +{ + struct tb_port *up, *down, *port; + enum tb_link_width down_width; + int ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + if (width == TB_LINK_WIDTH_ASYM_TX) { + down_width = TB_LINK_WIDTH_ASYM_RX; + port = down; + } else { + down_width = TB_LINK_WIDTH_ASYM_TX; + port = up; + } + + ret = tb_port_set_link_width(up, width); + if (ret) + return ret; + + ret = tb_port_set_link_width(down, down_width); + if (ret) + return ret; + + /* + * Initiate the change in the router that one of its TX lanes is + * changing to RX but do so only if there is an actual change. + */ + if (sw->link_width != width) { + ret = usb4_port_asym_start(port); + if (ret) + return ret; + + ret = tb_port_wait_for_link_width(up, width, 100); + if (ret) + return ret; + } + + return 0; +} + +/* Note updating sw->link_width done in tb_switch_update_link_attributes() */ +static int tb_switch_asym_disable(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL); + if (ret) + return ret; + + ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL); + if (ret) + return ret; + + /* + * Initiate the change in the router that has three TX lanes and + * is changing one of its TX lanes to RX but only if there is a + * change in the link width. + */ + if (sw->link_width > TB_LINK_WIDTH_DUAL) { + if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) + ret = usb4_port_asym_start(up); + else + ret = usb4_port_asym_start(down); + if (ret) + return ret; + + ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100); + if (ret) + return ret; + } + + return 0; +} + +/** + * tb_switch_set_link_width() - Configure router link width + * @sw: Router to configure + * @width: The new link width + * + * Set device router link width to @width from router upstream port + * perspective. Supports also asymmetric links if the routers boths side + * of the link supports it. + * + * Does nothing for host router. + * + * Returns %0 in case of success, negative errno otherwise. + */ +int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) +{ + struct tb_port *up, *down; + int ret = 0; + + if (!tb_route(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + switch (width) { + case TB_LINK_WIDTH_SINGLE: + ret = tb_switch_lane_bonding_disable(sw); + break; + + case TB_LINK_WIDTH_DUAL: + if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || + sw->link_width == TB_LINK_WIDTH_ASYM_RX) { + ret = tb_switch_asym_disable(sw); + if (ret) + break; + } + ret = tb_switch_lane_bonding_enable(sw); + break; + + case TB_LINK_WIDTH_ASYM_TX: + case TB_LINK_WIDTH_ASYM_RX: + ret = tb_switch_asym_enable(sw, width); + break; + } + + switch (ret) { + case 0: + break; + + case -ETIMEDOUT: + tb_sw_warn(sw, "timeout changing link width\n"); + return ret; + + case -ENOTCONN: + case -EOPNOTSUPP: + case -ENODEV: + return ret; + + default: + tb_sw_dbg(sw, "failed to change link width: %d\n", ret); + return ret; + }
tb_port_update_credits(down); tb_port_update_credits(up); + tb_switch_update_link_attributes(sw);
- tb_sw_dbg(sw, "lane bonding disabled\n"); + tb_sw_dbg(sw, "link width set to %s\n", width_name(width)); + return ret; }
/** @@ -3068,6 +3301,8 @@ int tb_switch_add(struct tb_switch *sw) if (ret) return ret;
+ tb_switch_link_init(sw); + ret = tb_switch_clx_init(sw); if (ret) return ret; diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index c5e10c1d4c38..ea155547e871 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -16,8 +16,31 @@ #include "tb_regs.h" #include "tunnel.h"
-#define TB_TIMEOUT 100 /* ms */ -#define MAX_GROUPS 7 /* max Group_ID is 7 */ +#define TB_TIMEOUT 100 /* ms */ + +/* + * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver + * direction. This is 40G - 10% guard band bandwidth. + */ +#define TB_ASYM_MIN (40000 * 90 / 100) + +/* + * Threshold bandwidth (in Mb/s) that is used to switch the links to + * asymmetric and back. This is selected as 45G which means when the + * request is higher than this, we switch the link to asymmetric, and + * when it is less than this we switch it back. The 45G is selected so + * that we still have 27G (of the total 72G) for bulk PCIe traffic when + * switching back to symmetric. + */ +#define TB_ASYM_THRESHOLD 45000 + +#define MAX_GROUPS 7 /* max Group_ID is 7 */ + +static unsigned int asym_threshold = TB_ASYM_THRESHOLD; +module_param_named(asym_threshold, asym_threshold, uint, 0444); +MODULE_PARM_DESC(asym_threshold, + "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: " + __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
/** * struct tb_cm - Simple Thunderbolt connection manager @@ -255,13 +278,13 @@ static int tb_enable_clx(struct tb_switch *sw) * this in the future to cover the whole topology if it turns * out to be beneficial. */ - while (sw && sw->config.depth > 1) + while (sw && tb_switch_depth(sw) > 1) sw = tb_switch_parent(sw);
if (!sw) return 0;
- if (sw->config.depth != 1) + if (tb_switch_depth(sw) != 1) return 0;
/* @@ -285,14 +308,32 @@ static int tb_enable_clx(struct tb_switch *sw) return ret == -EOPNOTSUPP ? 0 : ret; }
-/* Disables CL states up to the host router */ -static void tb_disable_clx(struct tb_switch *sw) +/** + * tb_disable_clx() - Disable CL states up to host router + * @sw: Router to start + * + * Disables CL states from @sw up to the host router. Returns true if + * any CL state were disabled. This can be used to figure out whether + * the link was setup by us or the boot firmware so we don't + * accidentally enable them if they were not enabled during discovery. + */ +static bool tb_disable_clx(struct tb_switch *sw) { + bool disabled = false; + do { - if (tb_switch_clx_disable(sw) < 0) + int ret; + + ret = tb_switch_clx_disable(sw); + if (ret > 0) + disabled = true; + else if (ret < 0) tb_sw_warn(sw, "failed to disable CL states\n"); + sw = tb_switch_parent(sw); } while (sw); + + return disabled; }
static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) @@ -553,7 +594,7 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, struct tb_switch *sw;
/* Pick the router that is deepest in the topology */ - if (dst_port->sw->config.depth > src_port->sw->config.depth) + if (tb_port_path_direction_downstream(src_port, dst_port)) sw = dst_port->sw; else sw = src_port->sw; @@ -572,133 +613,294 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); }
-static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, - struct tb_port *dst_port, int *available_up, int *available_down) -{ - int usb3_consumed_up, usb3_consumed_down, ret; - struct tb_cm *tcm = tb_priv(tb); +/** + * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link + * @tb: Domain structure + * @src_port: Source protocol adapter + * @dst_port: Destination protocol adapter + * @port: USB4 port the consumed bandwidth is calculated + * @consumed_up: Consumed upsream bandwidth (Mb/s) + * @consumed_down: Consumed downstream bandwidth (Mb/s) + * + * Calculates consumed USB3 and PCIe bandwidth at @port between path + * from @src_port to @dst_port. Does not take tunnel starting from + * @src_port and ending from @src_port into account. + */ +static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port, + struct tb_port *port, + int *consumed_up, + int *consumed_down) +{ + int pci_consumed_up, pci_consumed_down; struct tb_tunnel *tunnel; - struct tb_port *port;
- tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n", - tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw), - dst_port->port); + *consumed_up = *consumed_down = 0;
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); if (tunnel && tunnel->src_port != src_port && tunnel->dst_port != dst_port) { - ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, - &usb3_consumed_down); + int ret; + + ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up, + consumed_down); if (ret) return ret; - } else { - usb3_consumed_up = 0; - usb3_consumed_down = 0; }
- /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ - *available_up = *available_down = 120000; + /* + * If there is anything reserved for PCIe bulk traffic take it + * into account here too. + */ + if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) { + *consumed_up += pci_consumed_up; + *consumed_down += pci_consumed_down; + }
- /* Find the minimum available bandwidth over all links */ - tb_for_each_port_on_path(src_port, dst_port, port) { - int link_speed, link_width, up_bw, down_bw; + return 0; +}
- if (!tb_port_is_null(port)) +/** + * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link + * @tb: Domain structure + * @src_port: Source protocol adapter + * @dst_port: Destination protocol adapter + * @port: USB4 port the consumed bandwidth is calculated + * @consumed_up: Consumed upsream bandwidth (Mb/s) + * @consumed_down: Consumed downstream bandwidth (Mb/s) + * + * Calculates consumed DP bandwidth at @port between path from @src_port + * to @dst_port. Does not take tunnel starting from @src_port and ending + * from @src_port into account. + */ +static int tb_consumed_dp_bandwidth(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port, + struct tb_port *port, + int *consumed_up, + int *consumed_down) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + int ret; + + *consumed_up = *consumed_down = 0; + + /* + * Find all DP tunnels that cross the port and reduce + * their consumed bandwidth from the available. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + int dp_consumed_up, dp_consumed_down; + + if (tb_tunnel_is_invalid(tunnel)) + continue; + + if (!tb_tunnel_is_dp(tunnel)) continue;
- if (tb_is_upstream_port(port)) { - link_speed = port->sw->link_speed; + if (!tb_tunnel_port_on_path(tunnel, port)) + continue; + + /* + * Ignore the DP tunnel between src_port and dst_port + * because it is the same tunnel and we may be + * re-calculating estimated bandwidth. + */ + if (tunnel->src_port == src_port && + tunnel->dst_port == dst_port) + continue; + + ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up, + &dp_consumed_down); + if (ret) + return ret; + + *consumed_up += dp_consumed_up; + *consumed_down += dp_consumed_down; + } + + return 0; +} + +static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port, + struct tb_port *port) +{ + bool downstream = tb_port_path_direction_downstream(src_port, dst_port); + enum tb_link_width width; + + if (tb_is_upstream_port(port)) + width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX; + else + width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX; + + return tb_port_width_supported(port, width); +} + +/** + * tb_maximum_banwidth() - Maximum bandwidth over a single link + * @tb: Domain structure + * @src_port: Source protocol adapter + * @dst_port: Destination protocol adapter + * @port: USB4 port the total bandwidth is calculated + * @max_up: Maximum upstream bandwidth (Mb/s) + * @max_down: Maximum downstream bandwidth (Mb/s) + * @include_asym: Include bandwidth if the link is switched from + * symmetric to asymmetric + * + * Returns maximum possible bandwidth in @max_up and @max_down over a + * single link at @port. If @include_asym is set then includes the + * additional banwdith if the links are transitioned into asymmetric to + * direction from @src_port to @dst_port. + */ +static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, struct tb_port *port, + int *max_up, int *max_down, bool include_asym) +{ + bool downstream = tb_port_path_direction_downstream(src_port, dst_port); + int link_speed, link_width, up_bw, down_bw; + + /* + * Can include asymmetric, only if it is actually supported by + * the lane adapter. + */ + if (!tb_asym_supported(src_port, dst_port, port)) + include_asym = false; + + if (tb_is_upstream_port(port)) { + link_speed = port->sw->link_speed; + /* + * sw->link_width is from upstream perspective so we use + * the opposite for downstream of the host router. + */ + if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { + up_bw = link_speed * 3 * 1000; + down_bw = link_speed * 1 * 1000; + } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { + up_bw = link_speed * 1 * 1000; + down_bw = link_speed * 3 * 1000; + } else if (include_asym) { /* - * sw->link_width is from upstream perspective - * so we use the opposite for downstream of the - * host router. + * The link is symmetric at the moment but we + * can switch it to asymmetric as needed. Report + * this bandwidth as available (even though it + * is not yet enabled). */ - if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { - up_bw = link_speed * 3 * 1000; - down_bw = link_speed * 1 * 1000; - } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { + if (downstream) { up_bw = link_speed * 1 * 1000; down_bw = link_speed * 3 * 1000; } else { - up_bw = link_speed * port->sw->link_width * 1000; - down_bw = up_bw; + up_bw = link_speed * 3 * 1000; + down_bw = link_speed * 1 * 1000; } } else { - link_speed = tb_port_get_link_speed(port); - if (link_speed < 0) - return link_speed; - - link_width = tb_port_get_link_width(port); - if (link_width < 0) - return link_width; - - if (link_width == TB_LINK_WIDTH_ASYM_TX) { + up_bw = link_speed * port->sw->link_width * 1000; + down_bw = up_bw; + } + } else { + link_speed = tb_port_get_link_speed(port); + if (link_speed < 0) + return link_speed; + + link_width = tb_port_get_link_width(port); + if (link_width < 0) + return link_width; + + if (link_width == TB_LINK_WIDTH_ASYM_TX) { + up_bw = link_speed * 1 * 1000; + down_bw = link_speed * 3 * 1000; + } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { + up_bw = link_speed * 3 * 1000; + down_bw = link_speed * 1 * 1000; + } else if (include_asym) { + /* + * The link is symmetric at the moment but we + * can switch it to asymmetric as needed. Report + * this bandwidth as available (even though it + * is not yet enabled). + */ + if (downstream) { up_bw = link_speed * 1 * 1000; down_bw = link_speed * 3 * 1000; - } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { + } else { up_bw = link_speed * 3 * 1000; down_bw = link_speed * 1 * 1000; - } else { - up_bw = link_speed * link_width * 1000; - down_bw = up_bw; } + } else { + up_bw = link_speed * link_width * 1000; + down_bw = up_bw; } + }
- /* Leave 10% guard band */ - up_bw -= up_bw / 10; - down_bw -= down_bw / 10; - - tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw, - down_bw); + /* Leave 10% guard band */ + *max_up = up_bw - up_bw / 10; + *max_down = down_bw - down_bw / 10;
- /* - * Find all DP tunnels that cross the port and reduce - * their consumed bandwidth from the available. - */ - list_for_each_entry(tunnel, &tcm->tunnel_list, list) { - int dp_consumed_up, dp_consumed_down; + tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down); + return 0; +}
- if (tb_tunnel_is_invalid(tunnel)) - continue; +/** + * tb_available_bandwidth() - Available bandwidth for tunneling + * @tb: Domain structure + * @src_port: Source protocol adapter + * @dst_port: Destination protocol adapter + * @available_up: Available bandwidth upstream (Mb/s) + * @available_down: Available bandwidth downstream (Mb/s) + * @include_asym: Include bandwidth if the link is switched from + * symmetric to asymmetric + * + * Calculates maximum available bandwidth for protocol tunneling between + * @src_port and @dst_port at the moment. This is minimum of maximum + * link bandwidth across all links reduced by currently consumed + * bandwidth on that link. + * + * If @include_asym is true then includes also bandwidth that can be + * added when the links are transitioned into asymmetric (but does not + * transition the links). + */ +static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, int *available_up, + int *available_down, bool include_asym) +{ + struct tb_port *port; + int ret;
- if (!tb_tunnel_is_dp(tunnel)) - continue; + /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ + *available_up = *available_down = 120000;
- if (!tb_tunnel_port_on_path(tunnel, port)) - continue; + /* Find the minimum available bandwidth over all links */ + tb_for_each_port_on_path(src_port, dst_port, port) { + int max_up, max_down, consumed_up, consumed_down;
- /* - * Ignore the DP tunnel between src_port and - * dst_port because it is the same tunnel and we - * may be re-calculating estimated bandwidth. - */ - if (tunnel->src_port == src_port && - tunnel->dst_port == dst_port) - continue; + if (!tb_port_is_null(port)) + continue;
- ret = tb_tunnel_consumed_bandwidth(tunnel, - &dp_consumed_up, - &dp_consumed_down); - if (ret) - return ret; + ret = tb_maximum_bandwidth(tb, src_port, dst_port, port, + &max_up, &max_down, include_asym); + if (ret) + return ret;
- up_bw -= dp_consumed_up; - down_bw -= dp_consumed_down; - } + ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port, + port, &consumed_up, + &consumed_down); + if (ret) + return ret; + max_up -= consumed_up; + max_down -= consumed_down;
- /* - * If USB3 is tunneled from the host router down to the - * branch leading to port we need to take USB3 consumed - * bandwidth into account regardless whether it actually - * crosses the port. - */ - up_bw -= usb3_consumed_up; - down_bw -= usb3_consumed_down; + ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port, + &consumed_up, &consumed_down); + if (ret) + return ret; + max_up -= consumed_up; + max_down -= consumed_down;
- if (up_bw < *available_up) - *available_up = up_bw; - if (down_bw < *available_down) - *available_down = down_bw; + if (max_up < *available_up) + *available_up = max_up; + if (max_down < *available_down) + *available_down = max_down; }
if (*available_up < 0) @@ -736,7 +938,7 @@ static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, * That determines the whole USB3 bandwidth for this branch. */ ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, - &available_up, &available_down); + &available_up, &available_down, false); if (ret) { tb_warn(tb, "failed to calculate available bandwidth\n"); return; @@ -794,8 +996,8 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) return ret; }
- ret = tb_available_bandwidth(tb, down, up, &available_up, - &available_down); + ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down, + false); if (ret) goto err_reclaim;
@@ -856,6 +1058,225 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw) return 0; }
+/** + * tb_configure_asym() - Transition links to asymmetric if needed + * @tb: Domain structure + * @src_port: Source adapter to start the transition + * @dst_port: Destination adapter + * @requested_up: Additional bandwidth (Mb/s) required upstream + * @requested_down: Additional bandwidth (Mb/s) required downstream + * + * Transition links between @src_port and @dst_port into asymmetric, with + * three lanes in the direction from @src_port towards @dst_port and one lane + * in the opposite direction, if the bandwidth requirements + * (requested + currently consumed) on that link exceed @asym_threshold. + * + * Must be called with available >= requested over all links. + */ +static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, int requested_up, + int requested_down) +{ + struct tb_switch *sw; + bool clx, downstream; + struct tb_port *up; + int ret = 0; + + if (!asym_threshold) + return 0; + + /* Disable CL states before doing any transitions */ + downstream = tb_port_path_direction_downstream(src_port, dst_port); + /* Pick up router deepest in the hierarchy */ + if (downstream) + sw = dst_port->sw; + else + sw = src_port->sw; + + clx = tb_disable_clx(sw); + + tb_for_each_upstream_port_on_path(src_port, dst_port, up) { + int consumed_up, consumed_down; + enum tb_link_width width; + + ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, + &consumed_up, &consumed_down); + if (ret) + break; + + if (downstream) { + /* + * Downstream so make sure upstream is within the 36G + * (40G - guard band 10%), and the requested is above + * what the threshold is. + */ + if (consumed_up + requested_up >= TB_ASYM_MIN) { + ret = -ENOBUFS; + break; + } + /* Does consumed + requested exceed the threshold */ + if (consumed_down + requested_down < asym_threshold) + continue; + + width = TB_LINK_WIDTH_ASYM_RX; + } else { + /* Upstream, the opposite of above */ + if (consumed_down + requested_down >= TB_ASYM_MIN) { + ret = -ENOBUFS; + break; + } + if (consumed_up + requested_up < asym_threshold) + continue; + + width = TB_LINK_WIDTH_ASYM_TX; + } + + if (up->sw->link_width == width) + continue; + + if (!tb_port_width_supported(up, width)) + continue; + + tb_sw_dbg(up->sw, "configuring asymmetric link\n"); + + /* + * Here requested + consumed > threshold so we need to + * transtion the link into asymmetric now. + */ + ret = tb_switch_set_link_width(up->sw, width); + if (ret) { + tb_sw_warn(up->sw, "failed to set link width\n"); + break; + } + } + + /* Re-enable CL states if they were previosly enabled */ + if (clx) + tb_enable_clx(sw); + + return ret; +} + +/** + * tb_configure_sym() - Transition links to symmetric if possible + * @tb: Domain structure + * @src_port: Source adapter to start the transition + * @dst_port: Destination adapter + * @requested_up: New lower bandwidth request upstream (Mb/s) + * @requested_down: New lower bandwidth request downstream (Mb/s) + * + * Goes over each link from @src_port to @dst_port and tries to + * transition the link to symmetric if the currently consumed bandwidth + * allows. + */ +static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, int requested_up, + int requested_down) +{ + struct tb_switch *sw; + bool clx, downstream; + struct tb_port *up; + int ret = 0; + + if (!asym_threshold) + return 0; + + /* Disable CL states before doing any transitions */ + downstream = tb_port_path_direction_downstream(src_port, dst_port); + /* Pick up router deepest in the hierarchy */ + if (downstream) + sw = dst_port->sw; + else + sw = src_port->sw; + + clx = tb_disable_clx(sw); + + tb_for_each_upstream_port_on_path(src_port, dst_port, up) { + int consumed_up, consumed_down; + + /* Already symmetric */ + if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) + continue; + /* Unplugged, no need to switch */ + if (up->sw->is_unplugged) + continue; + + ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, + &consumed_up, &consumed_down); + if (ret) + break; + + if (downstream) { + /* + * Downstream so we want the consumed_down < threshold. + * Upstream traffic should be less than 36G (40G + * guard band 10%) as the link was configured asymmetric + * already. + */ + if (consumed_down + requested_down >= asym_threshold) + continue; + } else { + if (consumed_up + requested_up >= asym_threshold) + continue; + } + + if (up->sw->link_width == TB_LINK_WIDTH_DUAL) + continue; + + tb_sw_dbg(up->sw, "configuring symmetric link\n"); + + ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); + if (ret) { + tb_sw_warn(up->sw, "failed to set link width\n"); + break; + } + } + + /* Re-enable CL states if they were previosly enabled */ + if (clx) + tb_enable_clx(sw); + + return ret; +} + +static void tb_configure_link(struct tb_port *down, struct tb_port *up, + struct tb_switch *sw) +{ + struct tb *tb = sw->tb; + + /* Link the routers using both links if available */ + down->remote = up; + up->remote = down; + if (down->dual_link_port && up->dual_link_port) { + down->dual_link_port->remote = up->dual_link_port; + up->dual_link_port->remote = down->dual_link_port; + } + + /* + * Enable lane bonding if the link is currently two single lane + * links. + */ + if (sw->link_width < TB_LINK_WIDTH_DUAL) + tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); + + /* + * Device router that comes up as symmetric link is + * connected deeper in the hierarchy, we transition the links + * above into symmetric if bandwidth allows. + */ + if (tb_switch_depth(sw) > 1 && + tb_port_get_link_generation(up) >= 4 && + up->sw->link_width == TB_LINK_WIDTH_DUAL) { + struct tb_port *host_port; + + host_port = tb_port_at(tb_route(sw), tb->root_switch); + tb_configure_sym(tb, host_port, up, 0, 0); + } + + /* Set the link configured */ + tb_switch_configure_link(sw); +} + static void tb_scan_port(struct tb_port *port);
/* @@ -964,19 +1385,9 @@ static void tb_scan_port(struct tb_port *port) goto out_rpm_put; }
- /* Link the switches using both links if available */ upstream_port = tb_upstream_port(sw); - port->remote = upstream_port; - upstream_port->remote = port; - if (port->dual_link_port && upstream_port->dual_link_port) { - port->dual_link_port->remote = upstream_port->dual_link_port; - upstream_port->dual_link_port->remote = port->dual_link_port; - } + tb_configure_link(port, upstream_port, sw);
- /* Enable lane bonding if supported */ - tb_switch_lane_bonding_enable(sw); - /* Set the link configured */ - tb_switch_configure_link(sw); /* * CL0s and CL1 are enabled and supported together. * Silently ignore CLx enabling in case CLx is not supported. @@ -1040,6 +1451,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) * deallocated properly. */ tb_switch_dealloc_dp_resource(src_port->sw, src_port); + /* + * If bandwidth on a link is < asym_threshold + * transition the link to symmetric. + */ + tb_configure_sym(tb, src_port, dst_port, 0, 0); /* Now we can allow the domain to runtime suspend again */ pm_runtime_mark_last_busy(&dst_port->sw->dev); pm_runtime_put_autosuspend(&dst_port->sw->dev); @@ -1092,7 +1508,8 @@ static void tb_free_unplugged_children(struct tb_switch *sw) tb_retimer_remove_all(port); tb_remove_dp_resources(port->remote->sw); tb_switch_unconfigure_link(port->remote->sw); - tb_switch_lane_bonding_disable(port->remote->sw); + tb_switch_set_link_width(port->remote->sw, + TB_LINK_WIDTH_SINGLE); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) @@ -1196,7 +1613,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
out = tunnel->dst_port; ret = tb_available_bandwidth(tb, in, out, &estimated_up, - &estimated_down); + &estimated_down, true); if (ret) { tb_port_warn(in, "failed to re-calculate estimated bandwidth\n"); @@ -1212,7 +1629,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n", estimated_up, estimated_down);
- if (in->sw->config.depth < out->sw->config.depth) + if (tb_port_path_direction_downstream(in, out)) estimated_bw = estimated_down; else estimated_bw = estimated_up; @@ -1282,53 +1699,14 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) return NULL; }
-static void tb_tunnel_dp(struct tb *tb) +static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in, + struct tb_port *out) { int available_up, available_down, ret, link_nr; struct tb_cm *tcm = tb_priv(tb); - struct tb_port *port, *in, *out; + int consumed_up, consumed_down; struct tb_tunnel *tunnel;
- if (!tb_acpi_may_tunnel_dp()) { - tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); - return; - } - - /* - * Find pair of inactive DP IN and DP OUT adapters and then - * establish a DP tunnel between them. - */ - tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); - - in = NULL; - out = NULL; - list_for_each_entry(port, &tcm->dp_resources, list) { - if (!tb_port_is_dpin(port)) - continue; - - if (tb_port_is_enabled(port)) { - tb_port_dbg(port, "DP IN in use\n"); - continue; - } - - tb_port_dbg(port, "DP IN available\n"); - - out = tb_find_dp_out(tb, port); - if (out) { - in = port; - break; - } - } - - if (!in) { - tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); - return; - } - if (!out) { - tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); - return; - } - /* * This is only applicable to links that are not bonded (so * when Thunderbolt 1 hardware is involved somewhere in the @@ -1369,7 +1747,8 @@ static void tb_tunnel_dp(struct tb *tb) goto err_detach_group; }
- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); + ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, + true); if (ret) goto err_reclaim_usb;
@@ -1388,8 +1767,19 @@ static void tb_tunnel_dp(struct tb *tb) goto err_free; }
+ /* If fail reading tunnel's consumed bandwidth, tear it down */ + ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down); + if (ret) + goto err_deactivate; + list_add_tail(&tunnel->list, &tcm->tunnel_list); + tb_reclaim_usb3_bandwidth(tb, in, out); + /* + * Transition the links to asymmetric if the consumption exceeds + * the threshold. + */ + tb_configure_asym(tb, in, out, consumed_up, consumed_down);
/* Update the domain with the new bandwidth estimation */ tb_recalc_estimated_bandwidth(tb); @@ -1399,8 +1789,10 @@ static void tb_tunnel_dp(struct tb *tb) * TMU mode to HiFi for CL0s to work. */ tb_increase_tmu_accuracy(tunnel); - return; + return true;
+err_deactivate: + tb_tunnel_deactivate(tunnel); err_free: tb_tunnel_free(tunnel); err_reclaim_usb: @@ -1414,6 +1806,49 @@ static void tb_tunnel_dp(struct tb *tb) pm_runtime_put_autosuspend(&out->sw->dev); pm_runtime_mark_last_busy(&in->sw->dev); pm_runtime_put_autosuspend(&in->sw->dev); + + return false; +} + +static void tb_tunnel_dp(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *port, *in, *out; + + if (!tb_acpi_may_tunnel_dp()) { + tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); + return; + } + + /* + * Find pair of inactive DP IN and DP OUT adapters and then + * establish a DP tunnel between them. + */ + tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); + + in = NULL; + out = NULL; + list_for_each_entry(port, &tcm->dp_resources, list) { + if (!tb_port_is_dpin(port)) + continue; + + if (tb_port_is_enabled(port)) { + tb_port_dbg(port, "DP IN in use\n"); + continue; + } + + in = port; + tb_port_dbg(in, "DP IN available\n"); + + out = tb_find_dp_out(tb, port); + if (out) + tb_tunnel_one_dp(tb, in, out); + else + tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n"); + } + + if (!in) + tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); }
static void tb_enter_redrive(struct tb_port *port) @@ -1748,7 +2183,8 @@ static void tb_handle_hotplug(struct work_struct *work) tb_remove_dp_resources(port->remote->sw); tb_switch_tmu_disable(port->remote->sw); tb_switch_unconfigure_link(port->remote->sw); - tb_switch_lane_bonding_disable(port->remote->sw); + tb_switch_set_link_width(port->remote->sw, + TB_LINK_WIDTH_SINGLE); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) @@ -1883,6 +2319,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { + /* + * If bandwidth on a link is < asym_threshold transition + * the link to symmetric. + */ + tb_configure_sym(tb, in, out, *requested_up, *requested_down); /* * If requested bandwidth is less or equal than what is * currently allocated to that tunnel we simply change @@ -1908,7 +2349,8 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, * are also in the same group but we use the same function here * that we use with the normal bandwidth allocation). */ - ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); + ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, + true); if (ret) goto reclaim;
@@ -1917,8 +2359,23 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if ((*requested_up >= 0 && available_up >= requested_up_corrected) || (*requested_down >= 0 && available_down >= requested_down_corrected)) { + /* + * If bandwidth on a link is >= asym_threshold + * transition the link to asymmetric. + */ + ret = tb_configure_asym(tb, in, out, *requested_up, + *requested_down); + if (ret) { + tb_configure_sym(tb, in, out, 0, 0); + return ret; + } + ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, requested_down); + if (ret) { + tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); + tb_configure_sym(tb, in, out, 0, 0); + } } else { ret = -ENOBUFS; } @@ -1984,7 +2441,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
out = tunnel->dst_port;
- if (in->sw->config.depth < out->sw->config.depth) { + if (tb_port_path_direction_downstream(in, out)) { requested_up = -1; requested_down = requested_bw; } else { @@ -2240,7 +2697,8 @@ static void tb_restore_children(struct tb_switch *sw) continue;
if (port->remote) { - tb_switch_lane_bonding_enable(port->remote->sw); + tb_switch_set_link_width(port->remote->sw, + port->remote->sw->link_width); tb_switch_configure_link(port->remote->sw);
tb_restore_children(port->remote->sw); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 8a75aabb9ce8..920dac8a63e1 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -164,11 +164,6 @@ struct tb_switch_tmu { * switches) you need to have domain lock held. * * In USB4 terminology this structure represents a router. - * - * Note @link_width is not the same as whether link is bonded or not. - * For Gen 4 links the link is also bonded when it is asymmetric. The - * correct way to find out whether the link is bonded or not is to look - * @bonded field of the upstream port. */ struct tb_switch { struct device dev; @@ -868,6 +863,15 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw) return tb_port_at(tb_route(sw), tb_switch_parent(sw)); }
+/** + * tb_switch_depth() - Returns depth of the connected router + * @sw: Router + */ +static inline int tb_switch_depth(const struct tb_switch *sw) +{ + return sw->config.depth; +} + static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) { return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && @@ -960,8 +964,7 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw) return !sw->config.enabled; }
-int tb_switch_lane_bonding_enable(struct tb_switch *sw); -void tb_switch_lane_bonding_disable(struct tb_switch *sw); +int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width); int tb_switch_configure_link(struct tb_switch *sw); void tb_switch_unconfigure_link(struct tb_switch *sw);
@@ -1044,6 +1047,21 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid); struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev);
+/** + * tb_port_path_direction_downstream() - Checks if path directed downstream + * @src: Source adapter + * @dst: Destination adapter + * + * Returns %true only if the specified path from source adapter (@src) + * to destination adapter (@dst) is directed downstream. + */ +static inline bool +tb_port_path_direction_downstream(const struct tb_port *src, + const struct tb_port *dst) +{ + return src->sw->config.depth < dst->sw->config.depth; +} + static inline bool tb_port_use_credit_allocation(const struct tb_port *port) { return tb_port_is_null(port) && port->sw->credit_allocation; @@ -1061,12 +1079,29 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port) for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ (p) = tb_next_port_on_path((src), (dst), (p)))
+/** + * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path + * @src: Source port + * @dst: Destination port + * @p: Port used as iterator + * + * Walks over each upstream lane adapter on path from @src to @dst. + */ +#define tb_for_each_upstream_port_on_path(src, dst, p) \ + for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ + (p) = tb_next_port_on_path((src), (dst), (p))) \ + if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\ + continue; \ + } else + int tb_port_get_link_speed(struct tb_port *port); +int tb_port_get_link_generation(struct tb_port *port); int tb_port_get_link_width(struct tb_port *port); +bool tb_port_width_supported(struct tb_port *port, unsigned int width); int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width); int tb_port_lane_bonding_enable(struct tb_port *port); void tb_port_lane_bonding_disable(struct tb_port *port); -int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, +int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, int timeout_msec); int tb_port_update_credits(struct tb_port *port);
@@ -1264,6 +1299,11 @@ int usb4_port_router_online(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); bool usb4_port_clx_supported(struct tb_port *port); int usb4_port_margining_caps(struct tb_port *port, u32 *caps); + +bool usb4_port_asym_supported(struct tb_port *port); +int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width); +int usb4_port_asym_start(struct tb_port *port); + int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, unsigned int ber_level, bool timing, bool right_high, u32 *results); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 736e28beac11..4419e274d2b4 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -348,10 +348,14 @@ struct tb_regs_port_header { #define LANE_ADP_CS_1 0x01 #define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0) #define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc -#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) +#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(5, 4) #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK GENMASK(7, 6) +#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX 0x1 +#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX 0x2 +#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL 0x0 #define LANE_ADP_CS_1_CL0S_ENABLE BIT(10) #define LANE_ADP_CS_1_CL1_ENABLE BIT(11) #define LANE_ADP_CS_1_CL2_ENABLE BIT(12) @@ -384,6 +388,8 @@ struct tb_regs_port_header { #define PORT_CS_18_WOCS BIT(16) #define PORT_CS_18_WODS BIT(17) #define PORT_CS_18_WOU4S BIT(18) +#define PORT_CS_18_CSA BIT(22) +#define PORT_CS_18_TIP BIT(24) #define PORT_CS_19 0x13 #define PORT_CS_19_DPR BIT(0) #define PORT_CS_19_PC BIT(3) @@ -391,6 +397,7 @@ struct tb_regs_port_header { #define PORT_CS_19_WOC BIT(16) #define PORT_CS_19_WOD BIT(17) #define PORT_CS_19_WOU4 BIT(18) +#define PORT_CS_19_START_ASYM BIT(24)
/* Display Port adapter registers */ #define ADP_DP_CS_0 0x00 diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index a6810fb36860..8aec678d80d3 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -21,12 +21,18 @@ #define TB_PCI_PATH_DOWN 0 #define TB_PCI_PATH_UP 1
+#define TB_PCI_PRIORITY 3 +#define TB_PCI_WEIGHT 1 + /* USB3 adapters use always HopID of 8 for both directions */ #define TB_USB3_HOPID 8
#define TB_USB3_PATH_DOWN 0 #define TB_USB3_PATH_UP 1
+#define TB_USB3_PRIORITY 3 +#define TB_USB3_WEIGHT 2 + /* DP adapters use HopID 8 for AUX and 9 for Video */ #define TB_DP_AUX_TX_HOPID 8 #define TB_DP_AUX_RX_HOPID 8 @@ -36,6 +42,12 @@ #define TB_DP_AUX_PATH_OUT 1 #define TB_DP_AUX_PATH_IN 2
+#define TB_DP_VIDEO_PRIORITY 1 +#define TB_DP_VIDEO_WEIGHT 1 + +#define TB_DP_AUX_PRIORITY 2 +#define TB_DP_AUX_WEIGHT 1 + /* Minimum number of credits needed for PCIe path */ #define TB_MIN_PCIE_CREDITS 6U /* @@ -46,6 +58,18 @@ /* Minimum number of credits for DMA path */ #define TB_MIN_DMA_CREDITS 1
+#define TB_DMA_PRIORITY 5 +#define TB_DMA_WEIGHT 1 + +/* + * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic + * according to USB4 v2 Connection Manager guide. This ends up reserving + * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into + * account. + */ +#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT) +#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT) + static unsigned int dma_credits = TB_DMA_CREDITS; module_param(dma_credits, uint, 0444); MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " @@ -58,27 +82,6 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ - do { \ - struct tb_tunnel *__tunnel = (tunnel); \ - level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ - tb_route(__tunnel->src_port->sw), \ - __tunnel->src_port->port, \ - tb_route(__tunnel->dst_port->sw), \ - __tunnel->dst_port->port, \ - tb_tunnel_names[__tunnel->type], \ - ## arg); \ - } while (0) - -#define tb_tunnel_WARN(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) -#define tb_tunnel_warn(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) -#define tb_tunnel_info(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) -#define tb_tunnel_dbg(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) - static inline unsigned int tb_usable_credits(const struct tb_port *port) { return port->total_credits - port->ctl_credits; @@ -156,11 +159,11 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) { + struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); int ret;
/* Only supported of both routers are at least USB4 v2 */ - if (usb4_switch_version(tunnel->src_port->sw) < 2 || - usb4_switch_version(tunnel->dst_port->sw) < 2) + if (tb_port_get_link_generation(port) < 4) return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); @@ -234,8 +237,8 @@ static int tb_pci_init_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 3; - path->weight = 1; + path->priority = TB_PCI_PRIORITY; + path->weight = TB_PCI_WEIGHT; path->drop_packages = 0;
tb_path_for_each_hop(path, hop) { @@ -376,6 +379,51 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return NULL; }
+/** + * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe + * @port: Lane 0 adapter + * @reserved_up: Upstream bandwidth in Mb/s to reserve + * @reserved_down: Downstream bandwidth in Mb/s to reserve + * + * Can be called to any connected lane 0 adapter to find out how much + * bandwidth needs to be left in reserve for possible PCIe bulk traffic. + * Returns true if there is something to be reserved and writes the + * amount to @reserved_down/@reserved_up. Otherwise returns false and + * does not touch the parameters. + */ +bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, + int *reserved_down) +{ + if (WARN_ON_ONCE(!port->remote)) + return false; + + if (!tb_acpi_may_tunnel_pcie()) + return false; + + if (tb_port_get_link_generation(port) < 4) + return false; + + /* Must have PCIe adapters */ + if (tb_is_upstream_port(port)) { + if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP)) + return false; + if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN)) + return false; + } else { + if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN)) + return false; + if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP)) + return false; + } + + *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH; + *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH; + + tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up, + *reserved_down); + return true; +} + static bool tb_dp_is_usb4(const struct tb_switch *sw) { /* Titan Ridge DP adapters need the same treatment as USB4 */ @@ -614,8 +662,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in_rate = tb_dp_cap_get_rate(in_dp_cap); in_lanes = tb_dp_cap_get_lanes(in_dp_cap); - tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); + tb_tunnel_dbg(tunnel, + "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
/* * If the tunnel bandwidth is limited (max_bw is set) then see @@ -624,10 +673,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) out_rate = tb_dp_cap_get_rate(out_dp_cap); out_lanes = tb_dp_cap_get_lanes(out_dp_cap); bw = tb_dp_bandwidth(out_rate, out_lanes); - tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - out_rate, out_lanes, bw); + tb_tunnel_dbg(tunnel, + "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + out_rate, out_lanes, bw);
- if (in->sw->config.depth < out->sw->config.depth) + if (tb_port_path_direction_downstream(in, out)) max_bw = tunnel->max_down; else max_bw = tunnel->max_up; @@ -639,13 +689,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) out_rate, out_lanes, &new_rate, &new_lanes); if (ret) { - tb_port_info(out, "not enough bandwidth for DP tunnel\n"); + tb_tunnel_info(tunnel, "not enough bandwidth\n"); return ret; }
new_bw = tb_dp_bandwidth(new_rate, new_lanes); - tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", - new_rate, new_lanes, new_bw); + tb_tunnel_dbg(tunnel, + "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", + new_rate, new_lanes, new_bw);
/* * Set new rate and number of lanes before writing it to @@ -662,7 +713,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) */ if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; - tb_port_dbg(out, "disabling LTTPR\n"); + tb_tunnel_dbg(tunnel, "disabling LTTPR\n"); }
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, @@ -712,8 +763,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) lanes = min(in_lanes, out_lanes); tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate, - lanes, tmp); + tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tmp);
ret = usb4_dp_port_set_nrd(in, rate, lanes); if (ret) @@ -728,15 +779,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) rate = min(in_rate, out_rate); tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in, - "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tmp); + tb_tunnel_dbg(tunnel, + "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tmp);
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; granularity *= 2) ;
- tb_port_dbg(in, "granularity %d Mb/s\n", granularity); + tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
/* * Returns -EINVAL if granularity above is outside of the @@ -751,12 +802,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) * max_up/down fields. For discovery we just read what the * estimation was set to. */ - if (in->sw->config.depth < out->sw->config.depth) + if (tb_port_path_direction_downstream(in, out)) estimated_bw = tunnel->max_down; else estimated_bw = tunnel->max_up;
- tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw); + tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw); if (ret) @@ -767,7 +818,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) if (ret) return ret;
- tb_port_dbg(in, "bandwidth allocation mode enabled\n"); + tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n"); return 0; }
@@ -788,7 +839,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel) if (!usb4_dp_port_bandwidth_mode_supported(in)) return 0;
- tb_port_dbg(in, "bandwidth allocation mode supported\n"); + tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
ret = usb4_dp_port_set_cm_id(in, tb->index); if (ret) @@ -805,7 +856,7 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel) return; if (usb4_dp_port_bandwidth_mode_enabled(in)) { usb4_dp_port_set_cm_bandwidth_mode_supported(in, false); - tb_port_dbg(in, "bandwidth allocation mode disabled\n"); + tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n"); } }
@@ -921,10 +972,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, if (allocated_bw == max_bw) allocated_bw = ret;
- tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n", - allocated_bw); - - if (in->sw->config.depth < out->sw->config.depth) { + if (tb_port_path_direction_downstream(in, out)) { *consumed_up = 0; *consumed_down = allocated_bw; } else { @@ -959,7 +1007,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up if (allocated_bw == max_bw) allocated_bw = ret;
- if (in->sw->config.depth < out->sw->config.depth) { + if (tb_port_path_direction_downstream(in, out)) { *allocated_up = 0; *allocated_down = allocated_bw; } else { @@ -987,7 +1035,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, if (ret < 0) return ret;
- if (in->sw->config.depth < out->sw->config.depth) { + if (tb_port_path_direction_downstream(in, out)) { tmp = min(*alloc_down, max_bw); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) @@ -1006,9 +1054,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, /* Now we can use BW mode registers to figure out the bandwidth */ /* TODO: need to handle discovery too */ tunnel->bw_mode = true; - - tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n", - tmp); return 0; }
@@ -1035,8 +1080,7 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, *rate = tb_dp_cap_get_rate(val); *lanes = tb_dp_cap_get_lanes(val);
- tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n", - tb_dp_bandwidth(*rate, *lanes)); + tb_tunnel_dbg(tunnel, "DPRX read done\n"); return 0; } usleep_range(100, 150); @@ -1073,9 +1117,6 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
*rate = tb_dp_cap_get_rate(val); *lanes = tb_dp_cap_get_lanes(val); - - tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap, - tb_dp_bandwidth(*rate, *lanes)); return 0; }
@@ -1092,7 +1133,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, if (ret < 0) return ret;
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { *max_up = 0; *max_down = ret; } else { @@ -1150,7 +1191,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, return 0; }
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { *consumed_up = 0; *consumed_down = tb_dp_bandwidth(rate, lanes); } else { @@ -1180,8 +1221,8 @@ static void tb_dp_init_aux_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 2; - path->weight = 1; + path->priority = TB_DP_AUX_PRIORITY; + path->weight = TB_DP_AUX_WEIGHT;
tb_path_for_each_hop(path, hop) tb_dp_init_aux_credits(hop); @@ -1224,8 +1265,8 @@ static int tb_dp_init_video_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_NONE; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 1; - path->weight = 1; + path->priority = TB_DP_VIDEO_PRIORITY; + path->weight = TB_DP_VIDEO_WEIGHT;
tb_path_for_each_hop(path, hop) { int ret; @@ -1253,8 +1294,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) rate = tb_dp_cap_get_rate(dp_cap); lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tb_dp_bandwidth(rate, lanes)); + tb_tunnel_dbg(tunnel, + "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes));
out = tunnel->dst_port;
@@ -1265,8 +1307,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) rate = tb_dp_cap_get_rate(dp_cap); lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tb_dp_bandwidth(rate, lanes)); + tb_tunnel_dbg(tunnel, + "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes));
if (tb_port_read(in, &dp_cap, TB_CFG_PORT, in->cap_adap + DP_REMOTE_CAP, 1)) @@ -1275,8 +1318,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) rate = tb_dp_cap_get_rate(dp_cap); lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tb_dp_bandwidth(rate, lanes)); + tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); }
/** @@ -1497,8 +1540,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) path->ingress_fc_enable = TB_PATH_ALL; path->egress_shared_buffer = TB_PATH_NONE; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 5; - path->weight = 1; + path->priority = TB_DMA_PRIORITY; + path->weight = TB_DMA_WEIGHT; path->clear_fc = true;
/* @@ -1531,8 +1574,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) path->ingress_fc_enable = TB_PATH_ALL; path->egress_shared_buffer = TB_PATH_NONE; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 5; - path->weight = 1; + path->priority = TB_DMA_PRIORITY; + path->weight = TB_DMA_WEIGHT; path->clear_fc = true;
tb_path_for_each_hop(path, hop) { @@ -1758,14 +1801,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - int pcie_enabled = tb_acpi_may_tunnel_pcie(); + struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); + int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
/* * PCIe tunneling, if enabled, affects the USB3 bandwidth so * take that it into account here. */ - *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; - *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; + *consumed_up = tunnel->allocated_up * + (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; + *consumed_down = tunnel->allocated_down * + (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; + + if (tb_port_get_link_generation(port) >= 4) { + *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH); + *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH); + } + return 0; }
@@ -1871,8 +1923,8 @@ static void tb_usb3_init_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 3; - path->weight = 3; + path->priority = TB_USB3_PRIORITY; + path->weight = TB_USB3_WEIGHT; path->drop_packages = 0;
tb_path_for_each_hop(path, hop) @@ -2387,3 +2439,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, tunnel->reclaim_available_bandwidth(tunnel, available_up, available_down); } + +const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel) +{ + return tb_tunnel_names[tunnel->type]; +} diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index bf690f7beeee..b4cff5482112 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -80,6 +80,8 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_port *down); +bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, + int *reserved_down); struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, @@ -137,5 +139,27 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) return tunnel->type == TB_TUNNEL_USB3; }
-#endif +const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel); + +#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ + do { \ + struct tb_tunnel *__tunnel = (tunnel); \ + level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ + tb_route(__tunnel->src_port->sw), \ + __tunnel->src_port->port, \ + tb_route(__tunnel->dst_port->sw), \ + __tunnel->dst_port->port, \ + tb_tunnel_type_name(__tunnel), \ + ## arg); \ + } while (0)
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) +#define tb_tunnel_warn(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) +#define tb_tunnel_info(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) +#define tb_tunnel_dbg(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) + +#endif diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 3aa32d7f9f6a..8db9bd32f473 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1494,6 +1494,112 @@ bool usb4_port_clx_supported(struct tb_port *port) return !!(val & PORT_CS_18_CPS); }
+/** + * usb4_port_asym_supported() - If the port supports asymmetric link + * @port: USB4 port + * + * Checks if the port and the cable supports asymmetric link and returns + * %true in that case. + */ +bool usb4_port_asym_supported(struct tb_port *port) +{ + u32 val; + + if (!port->cap_usb4) + return false; + + if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1)) + return false; + + return !!(val & PORT_CS_18_CSA); +} + +/** + * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric + * @port: USB4 port + * @width: Asymmetric width to configure + * + * Sets USB4 port link width to @width. Can be called for widths where + * usb4_port_asym_width_supported() returned @true. + */ +int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK; + switch (width) { + case TB_LINK_WIDTH_DUAL: + val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, + LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL); + break; + case TB_LINK_WIDTH_ASYM_TX: + val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, + LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX); + break; + case TB_LINK_WIDTH_ASYM_RX: + val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, + LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX); + break; + default: + return -EINVAL; + } + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +/** + * usb4_port_asym_start() - Start symmetry change and wait for completion + * @port: USB4 port + * + * Start symmetry change of the link to asymmetric or symmetric + * (according to what was previously set in tb_port_set_link_width(). + * Wait for completion of the change. + * + * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or + * a negative errno in case of a failure. + */ +int usb4_port_asym_start(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + val &= ~PORT_CS_19_START_ASYM; + val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1); + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + /* + * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4 + * port started the symmetry transition. + */ + ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19, + PORT_CS_19_START_ASYM, 0, 1000); + if (ret) + return ret; + + /* Then wait for the transtion to be completed */ + return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18, + PORT_CS_18_TIP, 0, 5000); +} + /** * usb4_port_margining_caps() - Read USB4 port marginig capabilities * @port: USB4 port @@ -2274,13 +2380,13 @@ int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, goto err_request;
/* - * Always keep 1000 Mb/s to make sure xHCI has at least some + * Always keep 900 Mb/s to make sure xHCI has at least some * bandwidth available for isochronous traffic. */ - if (consumed_up < 1000) - consumed_up = 1000; - if (consumed_down < 1000) - consumed_down = 1000; + if (consumed_up < 900) + consumed_up = 900; + if (consumed_down < 900) + consumed_down = 900;
ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, consumed_down); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 8f472a2080ff..4caecc3525bf 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1567,7 +1567,7 @@ static int omap8250_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, omap8250_irq, 0, dev_name(&pdev->dev), priv); if (ret < 0) - return ret; + goto err;
priv->wakeirq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 2e1b1c827dfe..f798ef3c4130 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -124,7 +124,7 @@ struct qcom_geni_serial_port { dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; bool setup; - unsigned int baud; + unsigned long poll_timeout_us; unsigned long clk_rate; void *rx_buf; u32 loopback; @@ -270,22 +270,13 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport, { u32 reg; struct qcom_geni_serial_port *port; - unsigned int baud; - unsigned int fifo_bits; unsigned long timeout_us = 20000; struct qcom_geni_private_data *private_data = uport->private_data;
if (private_data->drv) { port = to_dev_port(uport); - baud = port->baud; - if (!baud) - baud = 115200; - fifo_bits = port->tx_fifo_depth * port->tx_fifo_width; - /* - * Total polling iterations based on FIFO worth of bytes to be - * sent at current baud. Add a little fluff to the wait. - */ - timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500; + if (port->poll_timeout_us) + timeout_us = port->poll_timeout_us; }
/* @@ -1223,11 +1214,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, unsigned long clk_rate; u32 ver, sampling_rate; unsigned int avg_bw_core; + unsigned long timeout;
qcom_geni_serial_stop_rx(uport); /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); - port->baud = baud;
sampling_rate = UART_OVERSAMPLING; /* Sampling rate is halved for IP versions >= 2.5 */ @@ -1305,9 +1296,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, else tx_trans_cfg |= UART_CTS_MASK;
- if (baud) + if (baud) { uart_update_timeout(uport, termios->c_cflag, baud);
+ /* + * Make sure that qcom_geni_serial_poll_bitfield() waits for + * the FIFO, two-word intermediate transfer register and shift + * register to clear. + * + * Note that uart_fifo_timeout() also adds a 20 ms margin. + */ + timeout = jiffies_to_usecs(uart_fifo_timeout(uport)); + timeout += 3 * timeout / port->tx_fifo_depth; + WRITE_ONCE(port->poll_timeout_us, timeout); + } + if (!uart_console(uport)) writel(port->loopback, uport->membase + SE_UART_LOOPBACK_CFG); diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index de220ac8ca54..5a1de6044b38 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -578,8 +578,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id) u32 clk_cfg;
writew(1, base + RP2_GLOBAL_CMD); - readw(base + RP2_GLOBAL_CMD); msleep(100); + readw(base + RP2_GLOBAL_CMD); writel(0, base + RP2_CLK_PRESCALER);
/* TDM clock configuration */ diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index ed8798fdf522..5d5570bebfeb 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2694,13 +2694,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) int ret = 0;
tport = &state->port; - mutex_lock(&tport->mutex); + + guard(mutex)(&tport->mutex);
port = uart_port_check(state); - if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) { - ret = -1; - goto out; - } + if (!port || port->type == PORT_UNKNOWN || + !(port->ops->poll_get_char && port->ops->poll_put_char)) + return -1;
pm_state = state->pm_state; uart_change_pm(state, UART_PM_STATE_ON); @@ -2720,10 +2720,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) ret = uart_set_options(port, NULL, baud, parity, bits, flow); console_list_unlock(); } -out: + if (ret) uart_change_pm(state, pm_state); - mutex_unlock(&tport->mutex); + return ret; }
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 922ae1d76d90..643157a92c62 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table { [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, - [MODE_MAX][0][0] = { 7643136, 307200 }, + [MODE_MAX][0][0] = { 7643136, 819200 }, };
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index e0e97a138666..1d18d5002ef0 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, cur_td->last_trb, hw_deq);
- if (seg && (pep->ep_state & EP_ENABLED)) + if (seg && (pep->ep_state & EP_ENABLED) && + !(pep->ep_state & EP_DIS_IN_RROGRESS)) cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, cur_td, &deq_state); else @@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, * During disconnecting all endpoint will be disabled so we don't * have to worry about updating dequeue pointer. */ - if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { + if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING || + pep->ep_state & EP_DIS_IN_RROGRESS) { status = -ESHUTDOWN; ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); } diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index ceca4d839dfd..7ba760ee62e3 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -62,7 +62,9 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { .resume_quirk = xhci_cdns3_resume_quirk, };
-static const struct xhci_plat_priv xhci_plat_cdnsp_xhci; +static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = { + .quirks = XHCI_CDNS_SCTX_QUIRK, +};
static int __cdns_host_init(struct cdns *cdns) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 0c1b69d944ca..605fea461102 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -962,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) struct acm *acm = tty->driver_data;
ss->line = acm->minor; + mutex_lock(&acm->port.mutex); ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : jiffies_to_msecs(acm->port.closing_wait) / 10; + mutex_unlock(&acm->port.mutex); return 0; }
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index a8605b02115b..1ad8fa3f862a 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) role = USB_ROLE_DEVICE; }
+ if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) && + dwc2_is_device_mode(hsotg) && + hsotg->lx_state == DWC2_L2 && + hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && + hsotg->bus_suspended && + !hsotg->params.no_clock_gating) + dwc2_gadget_exit_clock_gating(hsotg, 0); + if (role == USB_ROLE_HOST) { already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b1e3fa54c639..54c47463c215 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2289,7 +2289,10 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); erst_base &= ERST_BASE_RSVDP; erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; - xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); + if (xhci->quirks & XHCI_WRITE_64_HI_LO) + hi_lo_writeq(erst_base, &ir->ir_set->erst_base); + else + xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
/* Set the event ring dequeue address of this interrupter */ xhci_set_hc_event_deq(xhci, ir); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 6cee705568c2..dd02b064ecd4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -75,6 +75,9 @@ #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
+#define PCI_DEVICE_ID_CADENCE 0x17CD +#define PCI_DEVICE_ID_CADENCE_SSP 0x0200 + static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver; @@ -539,6 +542,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; }
+ if (pdev->vendor == PCI_DEVICE_ID_CADENCE && + pdev->device == PCI_DEVICE_ID_CADENCE_SSP) + xhci->quirks |= XHCI_CDNS_SCTX_QUIRK; + /* xHC spec requires PCI devices to support D3hot and D3cold */ if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; @@ -721,8 +728,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) static void xhci_pci_remove(struct pci_dev *dev) { struct xhci_hcd *xhci; + bool set_power_d3;
xhci = hcd_to_xhci(pci_get_drvdata(dev)); + set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP;
xhci->xhc_state |= XHCI_STATE_REMOVING;
@@ -735,11 +744,11 @@ static void xhci_pci_remove(struct pci_dev *dev) xhci->shared_hcd = NULL; }
+ usb_hcd_pci_remove(dev); + /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + if (set_power_d3) pci_set_power_state(dev, PCI_D3hot); - - usb_hcd_pci_remove(dev); }
/* diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 8dd85221cd92..95d8cf24b015 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1414,6 +1414,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_stream_ctx *ctx = &ep->stream_info->stream_ctx_array[stream_id]; deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; + + /* + * Cadence xHCI controllers store some endpoint state + * information within Rsvd0 fields of Stream Endpoint + * context. This field is not cleared during Set TR + * Dequeue Pointer command which causes XDMA to skip + * over transfer ring and leads to data loss on stream + * pipe. + * To fix this issue driver must clear Rsvd0 field. + */ + if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { + ctx->reserved[0] = 0; + ctx->reserved[1] = 0; + } } else { deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index b29fe4716f34..f5efee06ff06 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/usb/hcd.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-hi-lo.h>
/* Code sharing between pci-quirks and xhci hcd */ #include "xhci-ext-caps.h" @@ -1913,6 +1914,8 @@ struct xhci_hcd { #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) +#define XHCI_WRITE_64_HI_LO BIT_ULL(47) +#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index c8098e9b432e..62b5a30edc42 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb) case ACD_BTN_BRIGHT_UP: case ACD_BTN_BRIGHT_DOWN: pdata->button_pressed = 1; - schedule_delayed_work(&pdata->work, 0); + /* + * there is a window during which no device + * is registered + */ + if (pdata->bd ) + schedule_delayed_work(&pdata->work, 0); break; case ACD_BTN_NONE: default: @@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct backlight_properties props; + struct backlight_device *backlight; struct appledisplay *pdata; struct usb_device *udev = interface_to_usbdev(iface); struct usb_endpoint_descriptor *endpoint; @@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface, memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 0xff; - pdata->bd = backlight_device_register(bl_name, NULL, pdata, + backlight = backlight_device_register(bl_name, NULL, pdata, &appledisplay_bl_data, &props); - if (IS_ERR(pdata->bd)) { + if (IS_ERR(backlight)) { dev_err(&iface->dev, "Backlight registration failed\n"); - retval = PTR_ERR(pdata->bd); + retval = PTR_ERR(backlight); goto error; } + pdata->bd = backlight;
/* Try to get brightness */ brightness = appledisplay_bl_get_brightness(pdata->bd); diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index cecd7693b741..75f5a740cba3 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, address, data, iobuf, CYPRESS_MAX_REQSIZE, USB_CTRL_GET_TIMEOUT); + /* we must not process garbage */ + if (retval < 2) + goto err_buf;
/* store returned data (more READs to be added) */ switch (request) { @@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request, break; }
+err_buf: kfree(iobuf); error: return retval; diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index c640f98d20c5..6adaaf66c14d 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -34,6 +34,8 @@ #define YUREX_BUF_SIZE 8 #define YUREX_WRITE_TIMEOUT (HZ*2)
+#define MAX_S64_STRLEN 20 /* {-}922337203685477580{7,8} */ + /* table of devices that work with this driver */ static struct usb_device_id yurex_table[] = { { USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) }, @@ -401,8 +403,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, { struct usb_yurex *dev; int len = 0; - char in_buffer[20]; - unsigned long flags; + char in_buffer[MAX_S64_STRLEN];
dev = file->private_data;
@@ -412,13 +413,15 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, return -ENODEV; }
- spin_lock_irqsave(&dev->lock, flags); - len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); - spin_unlock_irqrestore(&dev->lock, flags); - mutex_unlock(&dev->io_mutex); - - if (WARN_ON_ONCE(len >= sizeof(in_buffer))) + if (WARN_ON_ONCE(dev->bbu > S64_MAX || dev->bbu < S64_MIN)) { + mutex_unlock(&dev->io_mutex); return -EIO; + } + + spin_lock_irq(&dev->lock); + scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu); + spin_unlock_irq(&dev->lock); + mutex_unlock(&dev->io_mutex);
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } @@ -507,8 +510,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, __func__, retval); goto error; } - if (set && timeout) + if (set && timeout) { + spin_lock_irq(&dev->lock); dev->bbu = c2; + spin_unlock_irq(&dev->lock); + } return timeout ? count : -EIO;
error: diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index da2c31ccc138..c29a195a0175 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -191,11 +191,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) if (irq < 0) return;
- irq_bypass_unregister_producer(&vq->call_ctx.producer); if (!vq->call_ctx.ctx) return;
- vq->call_ctx.producer.token = vq->call_ctx.ctx; vq->call_ctx.producer.irq = irq; ret = irq_bypass_register_producer(&vq->call_ctx.producer); if (unlikely(ret)) @@ -627,6 +625,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, vq->last_avail_idx = vq_state.split.avail_index; } break; + case VHOST_SET_VRING_CALL: + if (vq->call_ctx.ctx) { + if (ops->get_status(vdpa) & + VIRTIO_CONFIG_S_DRIVER_OK) + vhost_vdpa_unsetup_vq_irq(v, idx); + vq->call_ctx.producer.token = NULL; + } + break; }
r = vhost_vring_ioctl(&v->vdev, cmd, argp); @@ -659,13 +665,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, cb.callback = vhost_vdpa_virtqueue_cb; cb.private = vq; cb.trigger = vq->call_ctx.ctx; + vq->call_ctx.producer.token = vq->call_ctx.ctx; + if (ops->get_status(vdpa) & + VIRTIO_CONFIG_S_DRIVER_OK) + vhost_vdpa_setup_vq_irq(v, idx); } else { cb.callback = NULL; cb.private = NULL; cb.trigger = NULL; } ops->set_vq_cb(vdpa, idx, &cb); - vhost_vdpa_setup_vq_irq(v, idx); break;
case VHOST_SET_VRING_NUM: @@ -1316,6 +1325,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) for (i = 0; i < nvqs; i++) { vqs[i] = &v->vqs[i]; vqs[i]->handle_kick = handle_vq_kick; + vqs[i]->call_ctx.ctx = NULL; } vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false, vhost_vdpa_process_iotlb_msg); diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c index 406c1383cbda..1461a909e17e 100644 --- a/drivers/video/fbdev/hpfb.c +++ b/drivers/video/fbdev/hpfb.c @@ -343,6 +343,7 @@ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent) if (hpfb_init_one(paddr, vaddr)) { if (d->scode >= DIOII_SCBASE) iounmap((void *)vaddr); + release_mem_region(d->resource.start, resource_size(&d->resource)); return -ENOMEM; } return 0; diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c index 8ac021748d16..79649b0e89e4 100644 --- a/drivers/watchdog/imx_sc_wdt.c +++ b/drivers/watchdog/imx_sc_wdt.c @@ -213,29 +213,6 @@ static int imx_sc_wdt_probe(struct platform_device *pdev) return devm_watchdog_register_device(dev, wdog); }
-static int __maybe_unused imx_sc_wdt_suspend(struct device *dev) -{ - struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev); - - if (watchdog_active(&imx_sc_wdd->wdd)) - imx_sc_wdt_stop(&imx_sc_wdd->wdd); - - return 0; -} - -static int __maybe_unused imx_sc_wdt_resume(struct device *dev) -{ - struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev); - - if (watchdog_active(&imx_sc_wdd->wdd)) - imx_sc_wdt_start(&imx_sc_wdd->wdd); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops, - imx_sc_wdt_suspend, imx_sc_wdt_resume); - static const struct of_device_id imx_sc_wdt_dt_ids[] = { { .compatible = "fsl,imx-sc-wdt", }, { /* sentinel */ } @@ -247,7 +224,6 @@ static struct platform_driver imx_sc_wdt_driver = { .driver = { .name = "imx-sc-wdt", .of_match_table = imx_sc_wdt_dt_ids, - .pm = &imx_sc_wdt_pm_ops, }, }; module_platform_driver(imx_sc_wdt_driver); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 0e6c6c25d154..6d0d1c8a508b 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -78,9 +78,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); + phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
+ /* If buffer is physically aligned, ensure DMA alignment. */ + if (IS_ALIGNED(p, algn) && + !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn)) + return 1; + for (i = 1; i < nr_pages; i++) if (pfn_to_bfn(++xen_pfn) != ++next_bfn) return 1; @@ -140,7 +146,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size, void *ret;
/* Align the allocation to the Xen page size */ - size = 1UL << (order + XEN_PAGE_SHIFT); + size = ALIGN(size, XEN_PAGE_SIZE);
ret = (void *)__get_free_pages(flags, get_order(size)); if (!ret) @@ -172,7 +178,7 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, int order = get_order(size);
/* Convert the size to actually allocated. */ - size = 1UL << (order + XEN_PAGE_SHIFT); + size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index bda1fdbba666..ec6679a538c1 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -82,8 +82,10 @@ struct btrfs_inode { /* * Lock for counters and all fields used to determine if the inode is in * the log or not (last_trans, last_sub_trans, last_log_commit, - * logged_trans), to access/update new_delalloc_bytes and to update the - * VFS' inode number of bytes used. + * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes, + * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to + * update the VFS' inode number of bytes used. + * Also protects setting struct file::private_data. */ spinlock_t lock;
@@ -102,6 +104,14 @@ struct btrfs_inode { /* held while logging the inode in tree-log.c */ struct mutex log_mutex;
+ /* + * Counters to keep track of the number of extent item's we may use due + * to delalloc and such. outstanding_extents is the number of extent + * items we think we'll end up using, and reserved_extents is the number + * of extent items we've reserved metadata for. Protected by 'lock'. + */ + unsigned outstanding_extents; + /* used to order data wrt metadata */ struct btrfs_ordered_inode_tree ordered_tree;
@@ -122,28 +132,31 @@ struct btrfs_inode { u64 generation;
/* - * transid of the trans_handle that last modified this inode + * ID of the transaction handle that last modified this inode. + * Protected by 'lock'. */ u64 last_trans;
/* - * transid that last logged this inode + * ID of the transaction that last logged this inode. + * Protected by 'lock'. */ u64 logged_trans;
/* - * log transid when this inode was last modified + * Log transaction ID when this inode was last modified. + * Protected by 'lock'. */ int last_sub_trans;
- /* a local copy of root's last_log_commit */ + /* A local copy of root's last_log_commit. Protected by 'lock'. */ int last_log_commit;
union { /* * Total number of bytes pending delalloc, used by stat to * calculate the real block usage of the file. This is used - * only for files. + * only for files. Protected by 'lock'. */ u64 delalloc_bytes; /* @@ -161,7 +174,7 @@ struct btrfs_inode { * Total number of bytes pending delalloc that fall within a file * range that is either a hole or beyond EOF (and no prealloc extent * exists in the range). This is always <= delalloc_bytes and this - * is used only for files. + * is used only for files. Protected by 'lock'. */ u64 new_delalloc_bytes; /* @@ -172,15 +185,15 @@ struct btrfs_inode { };
/* - * total number of bytes pending defrag, used by stat to check whether - * it needs COW. + * Total number of bytes pending defrag, used by stat to check whether + * it needs COW. Protected by 'lock'. */ u64 defrag_bytes;
/* - * the size of the file stored in the metadata on disk. data=ordered + * The size of the file stored in the metadata on disk. data=ordered * means the in-memory i_size might be larger than the size on disk - * because not all the blocks are written yet. + * because not all the blocks are written yet. Protected by 'lock'. */ u64 disk_i_size;
@@ -214,7 +227,7 @@ struct btrfs_inode {
/* * Number of bytes outstanding that are going to need csums. This is - * used in ENOSPC accounting. + * used in ENOSPC accounting. Protected by 'lock'. */ u64 csum_bytes;
@@ -223,14 +236,6 @@ struct btrfs_inode { /* Read-only compatibility flags, upper half of inode_item::flags */ u32 ro_flags;
- /* - * Counters to keep track of the number of extent item's we may use due - * to delalloc and such. outstanding_extents is the number of extent - * items we think we'll end up using, and reserved_extents is the number - * of extent items we've reserved metadata for. - */ - unsigned outstanding_extents; - struct btrfs_block_rsv block_rsv;
/* diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 06333a74d6c4..f7bb4c34b984 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -445,6 +445,8 @@ struct btrfs_file_private { void *filldir_buf; u64 last_index; struct extent_state *llseek_cached_state; + /* Task that allocated this structure. */ + struct task_struct *owner_task; };
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 72851adc1fee..50d795a2542c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6175,13 +6175,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) continue;
ret = btrfs_trim_free_extents(device, &group_trimmed); + + trimmed += group_trimmed; if (ret) { dev_failed++; dev_ret = ret; break; } - - trimmed += group_trimmed; } mutex_unlock(&fs_devices->device_list_mutex);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 15fd8c00f4c0..fc6c91773bc8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3481,7 +3481,7 @@ static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence, static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) { struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host); - struct btrfs_file_private *private = file->private_data; + struct btrfs_file_private *private; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_state *cached_state = NULL; struct extent_state **delalloc_cached_state; @@ -3509,7 +3509,19 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) inode_get_bytes(&inode->vfs_inode) == i_size) return i_size;
- if (!private) { + spin_lock(&inode->lock); + private = file->private_data; + spin_unlock(&inode->lock); + + if (private && private->owner_task != current) { + /* + * Not allocated by us, don't use it as its cached state is used + * by the task that allocated it and we don't want neither to + * mess with it nor get incorrect results because it reflects an + * invalid state for the current task. + */ + private = NULL; + } else if (!private) { private = kzalloc(sizeof(*private), GFP_KERNEL); /* * No worries if memory allocation failed. @@ -3517,7 +3529,23 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) * lseek SEEK_HOLE/DATA calls to a file when there's delalloc, * so everything will still be correct. */ - file->private_data = private; + if (private) { + bool free = false; + + private->owner_task = current; + + spin_lock(&inode->lock); + if (file->private_data) + free = true; + else + file->private_data = private; + spin_unlock(&inode->lock); + + if (free) { + kfree(private); + private = NULL; + } + } }
if (private) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d5297523d497..5f0c9c3f3bbf 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -533,13 +533,11 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
range.minlen = max(range.minlen, minlen); ret = btrfs_trim_fs(fs_info, &range); - if (ret < 0) - return ret;
if (copy_to_user(arg, &range, sizeof(range))) return -EFAULT;
- return 0; + return ret; }
int __pure btrfs_is_empty_uuid(u8 *uuid) diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 1b999c6e4193..b98d42ca5564 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -713,8 +713,14 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, }
#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ - bitmap_cut(dst, subpage->bitmaps, 0, \ - subpage_info->name##_offset, subpage_info->bitmap_nr_bits) +{ \ + const int bitmap_nr_bits = subpage_info->bitmap_nr_bits; \ + \ + ASSERT(bitmap_nr_bits < BITS_PER_LONG); \ + *dst = bitmap_read(subpage->bitmaps, \ + subpage_info->name##_offset, \ + bitmap_nr_bits); \ +}
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, struct page *page, u64 start, u32 len) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 46c1f7498395..3a8ec33a1204 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1493,7 +1493,7 @@ static int check_extent_item(struct extent_buffer *leaf, dref_objectid > BTRFS_LAST_FREE_OBJECTID)) { extent_err(leaf, slot, "invalid data ref objectid value %llu", - dref_root); + dref_objectid); return -EUCLEAN; } if (unlikely(!IS_ALIGNED(dref_offset, diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 4dd8a993c60a..7c6f260a3be5 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -64,9 +64,15 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object) memcpy(buf->data, fscache_get_aux(object->cookie), len);
ret = cachefiles_inject_write_error(); - if (ret == 0) - ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, - buf, sizeof(struct cachefiles_xattr) + len, 0); + if (ret == 0) { + ret = mnt_want_write_file(file); + if (ret == 0) { + ret = vfs_setxattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache, buf, + sizeof(struct cachefiles_xattr) + len, 0); + mnt_drop_write_file(file); + } + } if (ret < 0) { trace_cachefiles_vfs_error(object, file_inode(file), ret, cachefiles_trace_setxattr_error); @@ -151,8 +157,14 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, int ret;
ret = cachefiles_inject_remove_error(); - if (ret == 0) - ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache); + if (ret == 0) { + ret = mnt_want_write(cache->mnt); + if (ret == 0) { + ret = vfs_removexattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache); + mnt_drop_write(cache->mnt); + } + } if (ret < 0) { trace_cachefiles_vfs_error(object, d_inode(dentry), ret, cachefiles_trace_remxattr_error); @@ -208,9 +220,15 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume) memcpy(buf->data, p, volume->vcookie->coherency_len);
ret = cachefiles_inject_write_error(); - if (ret == 0) - ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, - buf, len, 0); + if (ret == 0) { + ret = mnt_want_write(volume->cache->mnt); + if (ret == 0) { + ret = vfs_setxattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache, + buf, len, 0); + mnt_drop_write(volume->cache->mnt); + } + } if (ret < 0) { trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret, cachefiles_trace_setxattr_error); diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 6eae3f12ad50..553af738bb3e 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -74,13 +74,7 @@ struct fscrypt_nokey_name {
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) { - if (str->len == 1 && str->name[0] == '.') - return true; - - if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') - return true; - - return false; + return is_dot_dotdot(str->name, str->len); }
/** diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 03bd55069d86..2fe0f3af1a08 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -1949,16 +1949,6 @@ int ecryptfs_encrypt_and_encode_filename( return rc; }
-static bool is_dot_dotdot(const char *name, size_t name_size) -{ - if (name_size == 1 && name[0] == '.') - return true; - else if (name_size == 2 && name[0] == '.' && name[1] == '.') - return true; - - return false; -} - /** * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext * @plaintext_name: The plaintext name diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index edc8ec7581b8..9e40bee3682f 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -205,12 +205,14 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, unsigned int m_pofs) { struct erofs_inode *vi = EROFS_I(inode); - unsigned int bsz = i_blocksize(inode); + loff_t off; char *lnk;
- /* if it cannot be handled with fast symlink scheme */ - if (vi->datalayout != EROFS_INODE_FLAT_INLINE || - inode->i_size >= bsz || inode->i_size < 0) { + m_pofs += vi->xattr_isize; + /* check if it cannot be handled with fast symlink scheme */ + if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || + check_add_overflow(m_pofs, inode->i_size, &off) || + off > i_blocksize(inode)) { inode->i_op = &erofs_symlink_iops; return 0; } @@ -219,16 +221,6 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, if (!lnk) return -ENOMEM;
- m_pofs += vi->xattr_isize; - /* inline symlink data shouldn't cross block boundary */ - if (m_pofs + inode->i_size > bsz) { - kfree(lnk); - erofs_err(inode->i_sb, - "inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } memcpy(lnk, kaddr + m_pofs, inode->i_size); lnk[inode->i_size] = '\0';
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index b65058d972f9..1a1e2214c581 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -514,6 +514,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, if (min_inodes < 1) min_inodes = 1; min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4; + if (min_clusters < 0) + min_clusters = 0;
/* * Start looking in the flex group where we last allocated an @@ -755,10 +757,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino) struct ext4_group_desc *gdp; ext4_group_t group; int bit; - int err = -EFSCORRUPTED; + int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino) - goto out; + return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); @@ -860,6 +862,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino) err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh); sync_dirty_buffer(group_desc_bh); out: + brelse(inode_bitmap_bh); return err; }
@@ -1053,12 +1056,13 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap, brelse(inode_bitmap_bh); inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); /* Skip groups with suspicious inode tables */ - if (((!(sbi->s_mount_state & EXT4_FC_REPLAY)) - && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || - IS_ERR(inode_bitmap_bh)) { + if (IS_ERR(inode_bitmap_bh)) { inode_bitmap_bh = NULL; goto next_group; } + if (!(sbi->s_mount_state & EXT4_FC_REPLAY) && + EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + goto next_group;
repeat_in_this_group: ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index a604aa1d23ae..cb65052ee3de 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1665,24 +1665,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, struct ext4_dir_entry_2 **res_dir, int *has_inline_data) { + struct ext4_xattr_ibody_find is = { + .s = { .not_found = -ENODATA, }, + }; + struct ext4_xattr_info i = { + .name_index = EXT4_XATTR_INDEX_SYSTEM, + .name = EXT4_XATTR_SYSTEM_DATA, + }; int ret; - struct ext4_iloc iloc; void *inline_start; int inline_size;
- if (ext4_get_inode_loc(dir, &iloc)) - return NULL; + ret = ext4_get_inode_loc(dir, &is.iloc); + if (ret) + return ERR_PTR(ret);
down_read(&EXT4_I(dir)->xattr_sem); + + ret = ext4_xattr_ibody_find(dir, &i, &is); + if (ret) + goto out; + if (!ext4_has_inline_data(dir)) { *has_inline_data = 0; goto out; }
- inline_start = (void *)ext4_raw_inode(&iloc)->i_block + + inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block + EXT4_INLINE_DOTDOT_SIZE; inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; - ret = ext4_search_dir(iloc.bh, inline_start, inline_size, + ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size, dir, fname, 0, res_dir); if (ret == 1) goto out_find; @@ -1692,20 +1704,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE) goto out;
- inline_start = ext4_get_inline_xattr_pos(dir, &iloc); + inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc); inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size, + ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size, dir, fname, 0, res_dir); if (ret == 1) goto out_find;
out: - brelse(iloc.bh); - iloc.bh = NULL; + brelse(is.iloc.bh); + if (ret < 0) + is.iloc.bh = ERR_PTR(ret); + else + is.iloc.bh = NULL; out_find: up_read(&EXT4_I(dir)->xattr_sem); - return iloc.bh; + return is.iloc.bh; }
int ext4_delete_inline_entry(handle_t *handle, diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 870397f3de55..87ba7f58216f 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3886,11 +3886,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb, /* * Clear the trimmed flag for the group so that the next * ext4_trim_fs can trim it. - * If the volume is mounted with -o discard, online discard - * is supported and the free blocks will be trimmed online. */ - if (!test_opt(sb, DISCARD)) - EXT4_MB_GRP_CLEAR_TRIMMED(db); + EXT4_MB_GRP_CLEAR_TRIMMED(db);
if (!db->bb_free_root.rb_node) { /* No more items in the per group rb tree @@ -6586,8 +6583,9 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, " group:%u block:%d count:%lu failed" " with %d", block_group, bit, count, err); - } else - EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); + } + + EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5baacb3058ab..46c4f7504979 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5205,6 +5205,18 @@ static int ext4_block_group_meta_init(struct super_block *sb, int silent) return 0; }
+/* + * It's hard to get stripe aligned blocks if stripe is not aligned with + * cluster, just disable stripe and alert user to simplify code and avoid + * stripe aligned allocation which will rarely succeed. + */ +static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + return (stripe > 0 && sbi->s_cluster_ratio > 1 && + stripe % sbi->s_cluster_ratio != 0); +} + static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) { struct ext4_super_block *es = NULL; @@ -5312,13 +5324,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) goto failed_mount3;
sbi->s_stripe = ext4_get_stripe_size(sbi); - /* - * It's hard to get stripe aligned blocks if stripe is not aligned with - * cluster, just disable stripe and alert user to simpfy code and avoid - * stripe aligned allocation which will rarely successes. - */ - if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 && - sbi->s_stripe % sbi->s_cluster_ratio != 0) { + if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) { ext4_msg(sb, KERN_WARNING, "stripe (%lu) is not aligned with cluster size (%u), " "stripe is disabled", @@ -6482,6 +6488,15 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
}
+ if ((ctx->spec & EXT4_SPEC_s_stripe) && + ext4_is_stripe_incompatible(sb, ctx->s_stripe)) { + ext4_msg(sb, KERN_WARNING, + "stripe (%lu) is not aligned with cluster size (%u), " + "stripe is disabled", + ctx->s_stripe, sbi->s_cluster_ratio); + ctx->s_stripe = 0; + } + /* * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause * two calls to ext4_should_dioread_nolock() to return inconsistent diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index c07fe6b840a0..f7ef69f44f3d 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -887,14 +887,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { +#ifdef CONFIG_F2FS_CHECK_FS struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; - bool compressed = dn->data_blkaddr == COMPRESS_ADDR; int cluster_end = 0; + unsigned int count; int i; char *reason = "";
- if (!compressed) + if (dn->data_blkaddr != COMPRESS_ADDR) return false;
/* [..., COMPR_ADDR, ...] */ @@ -903,7 +904,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) goto out; }
- for (i = 1; i < cluster_size; i++) { + for (i = 1, count = 1; i < cluster_size; i++, count++) { block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i);
@@ -923,19 +924,42 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) goto out; } } + + f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size && + !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED)); + return false; out: f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s", dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); set_sbi_flag(sbi, SBI_NEED_FSCK); return true; +#else + return false; +#endif }
-static int __f2fs_cluster_blocks(struct inode *inode, - unsigned int cluster_idx, bool compr) +static int __f2fs_get_cluster_blocks(struct inode *inode, + struct dnode_of_data *dn) { - struct dnode_of_data dn; unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; + int count, i; + + for (i = 0, count = 0; i < cluster_size; i++) { + block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (__is_valid_data_blkaddr(blkaddr)) + count++; + } + + return count; +} + +static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx, + enum cluster_check_type type) +{ + struct dnode_of_data dn; unsigned int start_idx = cluster_idx << F2FS_I(inode)->i_log_cluster_size; int ret; @@ -950,31 +974,16 @@ static int __f2fs_cluster_blocks(struct inode *inode,
if (f2fs_sanity_check_cluster(&dn)) { ret = -EFSCORRUPTED; - f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER); goto fail; }
if (dn.data_blkaddr == COMPRESS_ADDR) { - int i; - - ret = 1; - for (i = 1; i < cluster_size; i++) { - block_t blkaddr; - - blkaddr = data_blkaddr(dn.inode, - dn.node_page, dn.ofs_in_node + i); - if (compr) { - if (__is_valid_data_blkaddr(blkaddr)) - ret++; - } else { - if (blkaddr != NULL_ADDR) - ret++; - } - } - - f2fs_bug_on(F2FS_I_SB(inode), - !compr && ret != cluster_size && - !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)); + if (type == CLUSTER_COMPR_BLKS) + ret = 1 + __f2fs_get_cluster_blocks(inode, &dn); + else if (type == CLUSTER_IS_COMPR) + ret = 1; + } else if (type == CLUSTER_RAW_BLKS) { + ret = __f2fs_get_cluster_blocks(inode, &dn); } fail: f2fs_put_dnode(&dn); @@ -984,15 +993,33 @@ static int __f2fs_cluster_blocks(struct inode *inode, /* return # of compressed blocks in compressed cluster */ static int f2fs_compressed_blocks(struct compress_ctx *cc) { - return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true); + return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, + CLUSTER_COMPR_BLKS); }
-/* return # of valid blocks in compressed cluster */ +/* return # of raw blocks in non-compressed cluster */ +static int f2fs_decompressed_blocks(struct inode *inode, + unsigned int cluster_idx) +{ + return __f2fs_cluster_blocks(inode, cluster_idx, + CLUSTER_RAW_BLKS); +} + +/* return whether cluster is compressed one or not */ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) { return __f2fs_cluster_blocks(inode, index >> F2FS_I(inode)->i_log_cluster_size, - false); + CLUSTER_IS_COMPR); +} + +/* return whether cluster contains non raw blocks or not */ +bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index) +{ + unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size; + + return f2fs_decompressed_blocks(inode, cluster_idx) != + F2FS_I(inode)->i_cluster_size; }
static bool cluster_may_compress(struct compress_ctx *cc) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 84fc87018180..1c59a3b2b2c3 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1614,9 +1614,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) map->m_flags |= F2FS_MAP_NEW; } else if (is_hole) { if (f2fs_compressed_file(inode) && - f2fs_sanity_check_cluster(&dn) && - (flag != F2FS_GET_BLOCK_FIEMAP || - IS_ENABLED(CONFIG_F2FS_CHECK_FS))) { + f2fs_sanity_check_cluster(&dn)) { err = -EFSCORRUPTED; f2fs_handle_error(sbi, ERROR_CORRUPTED_CLUSTER); @@ -2623,10 +2621,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) struct dnode_of_data dn; struct node_info ni; bool ipu_force = false; + bool atomic_commit; int err = 0;
/* Use COW inode to make dnode_of_data for atomic write */ - if (f2fs_is_atomic_file(inode)) + atomic_commit = f2fs_is_atomic_file(inode) && + page_private_atomic(fio->page); + if (atomic_commit) set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); else set_new_dnode(&dn, inode, NULL, NULL, 0); @@ -2730,6 +2731,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) f2fs_outplace_write_data(&dn, fio); trace_f2fs_do_write_data_page(page, OPU); set_inode_flag(inode, FI_APPEND_WRITE); + if (atomic_commit) + clear_page_private_atomic(page); out_writepage: f2fs_put_dnode(&dn); out: @@ -3700,6 +3703,9 @@ static int f2fs_write_end(struct file *file,
set_page_dirty(page);
+ if (f2fs_is_atomic_file(inode)) + set_page_private_atomic(page); + if (pos + copied > i_size_read(inode) && !f2fs_verity_in_progress(inode)) { f2fs_i_size_write(inode, pos + copied); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index c624ffff6f19..166ec8942595 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -157,7 +157,8 @@ static unsigned long dir_block_index(unsigned int level, unsigned long bidx = 0;
for (i = 0; i < level; i++) - bidx += dir_buckets(i, dir_level) * bucket_blocks(i); + bidx += mul_u32_u32(dir_buckets(i, dir_level), + bucket_blocks(i)); bidx += idx * bucket_blocks(level); return bidx; } diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 6a9a470345bf..d6fb053b6dfb 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -367,7 +367,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, static void __drop_largest_extent(struct extent_tree *et, pgoff_t fofs, unsigned int len) { - if (fofs < et->largest.fofs + et->largest.len && + if (fofs < (pgoff_t)et->largest.fofs + et->largest.len && fofs + len > et->largest.fofs) { et->largest.len = 0; et->largest_updated = true; @@ -457,7 +457,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (type == EX_READ && et->largest.fofs <= pgofs && - et->largest.fofs + et->largest.len > pgofs) { + (pgoff_t)et->largest.fofs + et->largest.len > pgofs) { *ei = et->largest; ret = true; stat_inc_largest_node_hit(sbi); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 6371b295fba6..7faf9446ea5d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -282,6 +282,7 @@ enum { APPEND_INO, /* for append ino list */ UPDATE_INO, /* for update ino list */ TRANS_DIR_INO, /* for transactions dir ino list */ + XATTR_DIR_INO, /* for xattr updated dir ino list */ FLUSH_INO, /* for multiple device flushing */ MAX_INO_ENTRY, /* max. list */ }; @@ -779,7 +780,6 @@ enum { FI_NEED_IPU, /* used for ipu per file */ FI_ATOMIC_FILE, /* indicate atomic file */ FI_DATA_EXIST, /* indicate data exists */ - FI_INLINE_DOTS, /* indicate inline dot dentries */ FI_SKIP_WRITES, /* should skip data page writeback */ FI_OPU_WRITE, /* used for opu per file */ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ @@ -797,6 +797,7 @@ enum { FI_ALIGNED_WRITE, /* enable aligned write */ FI_COW_FILE, /* indicate COW file */ FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ + FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */ FI_ATOMIC_REPLACE, /* indicate atomic replace */ FI_OPENED_FILE, /* indicate file has been opened */ FI_MAX, /* max flag, never be used */ @@ -1149,6 +1150,7 @@ enum cp_reason_type { CP_FASTBOOT_MODE, CP_SPEC_LOG_NUM, CP_RECOVER_DIR, + CP_XATTR_DIR, };
enum iostat_type { @@ -1411,7 +1413,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr); * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION * bit 2 PAGE_PRIVATE_INLINE_INODE * bit 3 PAGE_PRIVATE_REF_RESOURCE - * bit 4- f2fs private data + * bit 4 PAGE_PRIVATE_ATOMIC_WRITE + * bit 5- f2fs private data * * Layout B: lowest bit should be 0 * page.private is a wrapped pointer. @@ -1421,6 +1424,7 @@ enum { PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ + PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ PAGE_PRIVATE_MAX };
@@ -2386,14 +2390,17 @@ static inline void clear_page_private_##name(struct page *page) \ PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); +PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); +PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); +PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
static inline unsigned long get_page_private_data(struct page *page) { @@ -2425,6 +2432,7 @@ static inline void clear_page_private_all(struct page *page) clear_page_private_reference(page); clear_page_private_gcing(page); clear_page_private_inline(page); + clear_page_private_atomic(page);
f2fs_bug_on(F2FS_P_SB(page), page_private(page)); } @@ -3028,10 +3036,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, return; fallthrough; case FI_DATA_EXIST: - case FI_INLINE_DOTS: case FI_PIN_FILE: case FI_COMPRESS_RELEASED: - case FI_ATOMIC_COMMITTED: f2fs_mark_inode_dirty_sync(inode, true); } } @@ -3153,8 +3159,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) set_bit(FI_INLINE_DENTRY, fi->flags); if (ri->i_inline & F2FS_DATA_EXIST) set_bit(FI_DATA_EXIST, fi->flags); - if (ri->i_inline & F2FS_INLINE_DOTS) - set_bit(FI_INLINE_DOTS, fi->flags); if (ri->i_inline & F2FS_EXTRA_ATTR) set_bit(FI_EXTRA_ATTR, fi->flags); if (ri->i_inline & F2FS_PIN_FILE) @@ -3175,8 +3179,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) ri->i_inline |= F2FS_INLINE_DENTRY; if (is_inode_flag_set(inode, FI_DATA_EXIST)) ri->i_inline |= F2FS_DATA_EXIST; - if (is_inode_flag_set(inode, FI_INLINE_DOTS)) - ri->i_inline |= F2FS_INLINE_DOTS; if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) ri->i_inline |= F2FS_EXTRA_ATTR; if (is_inode_flag_set(inode, FI_PIN_FILE)) @@ -3263,11 +3265,6 @@ static inline int f2fs_exist_data(struct inode *inode) return is_inode_flag_set(inode, FI_DATA_EXIST); }
-static inline int f2fs_has_inline_dots(struct inode *inode) -{ - return is_inode_flag_set(inode, FI_INLINE_DOTS); -} - static inline int f2fs_is_mmap_file(struct inode *inode) { return is_inode_flag_set(inode, FI_MMAP_FILE); @@ -3373,17 +3370,6 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); }
-static inline bool is_dot_dotdot(const u8 *name, size_t len) -{ - if (len == 1 && name[0] == '.') - return true; - - if (len == 2 && name[0] == '.' && name[1] == '.') - return true; - - return false; -} - static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { @@ -3499,6 +3485,8 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); +int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, + bool readonly, bool need_lock); int f2fs_precache_extents(struct inode *inode); int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); int f2fs_fileattr_set(struct mnt_idmap *idmap, @@ -4274,6 +4262,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode) * compress.c */ #ifdef CONFIG_F2FS_FS_COMPRESSION +enum cluster_check_type { + CLUSTER_IS_COMPR, /* check only if compressed cluster */ + CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */ + CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */ +}; bool f2fs_is_compressed_page(struct page *page); struct page *f2fs_compress_control_page(struct page *page); int f2fs_prepare_compress_overwrite(struct inode *inode, @@ -4300,6 +4293,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, struct writeback_control *wbc, enum iostat_type io_type); int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); +bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index); void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, pgoff_t fofs, block_t blkaddr, unsigned int llen, unsigned int c_len); @@ -4386,6 +4380,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { } #define inc_compr_inode_stat(inode) do { } while (0) +static inline int f2fs_is_compressed_cluster( + struct inode *inode, + pgoff_t index) { return 0; } +static inline bool f2fs_is_sparse_cluster( + struct inode *inode, + pgoff_t index) { return true; } static inline void f2fs_update_read_extent_tree_range_compressed( struct inode *inode, pgoff_t fofs, block_t blkaddr, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 523896200908..74fac935bd09 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -213,6 +213,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO)) cp_reason = CP_RECOVER_DIR; + else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, + XATTR_DIR_INO)) + cp_reason = CP_XATTR_DIR;
return cp_reason; } @@ -2094,10 +2097,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) struct mnt_idmap *idmap = file_mnt_idmap(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct inode *pinode; loff_t isize; int ret;
+ if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + if (!inode_owner_or_capable(idmap, inode)) return -EACCES;
@@ -2143,15 +2148,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) /* Check if the inode already has a COW inode */ if (fi->cow_inode == NULL) { /* Create a COW inode for atomic write */ - pinode = f2fs_iget(inode->i_sb, fi->i_pino); - if (IS_ERR(pinode)) { - f2fs_up_write(&fi->i_gc_rwsem[WRITE]); - ret = PTR_ERR(pinode); - goto out; - } + struct dentry *dentry = file_dentry(filp); + struct inode *dir = d_inode(dentry->d_parent);
- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode); - iput(pinode); + ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode); if (ret) { f2fs_up_write(&fi->i_gc_rwsem[WRITE]); goto out; @@ -2164,6 +2164,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) F2FS_I(fi->cow_inode)->atomic_inode = inode; } else { /* Reuse the already created COW inode */ + f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode)); + + invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1); + ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true); if (ret) { f2fs_up_write(&fi->i_gc_rwsem[WRITE]); @@ -2205,6 +2209,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) struct mnt_idmap *idmap = file_mnt_idmap(filp); int ret;
+ if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + if (!inode_owner_or_capable(idmap, inode)) return -EACCES;
@@ -2237,6 +2244,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp) struct mnt_idmap *idmap = file_mnt_idmap(filp); int ret;
+ if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + if (!inode_owner_or_capable(idmap, inode)) return -EACCES;
@@ -2255,34 +2265,13 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp) return ret; }
-static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) +int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, + bool readonly, bool need_lock) { - struct inode *inode = file_inode(filp); - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct super_block *sb = sbi->sb; - __u32 in; int ret = 0;
- if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (get_user(in, (__u32 __user *)arg)) - return -EFAULT; - - if (in != F2FS_GOING_DOWN_FULLSYNC) { - ret = mnt_want_write_file(filp); - if (ret) { - if (ret == -EROFS) { - ret = 0; - f2fs_stop_checkpoint(sbi, false, - STOP_CP_REASON_SHUTDOWN); - trace_f2fs_shutdown(sbi, in, ret); - } - return ret; - } - } - - switch (in) { + switch (flag) { case F2FS_GOING_DOWN_FULLSYNC: ret = freeze_bdev(sb->s_bdev); if (ret) @@ -2316,18 +2305,62 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) goto out; }
+ if (readonly) + goto out; + + /* grab sb->s_umount to avoid racing w/ remount() */ + if (need_lock) + down_read(&sbi->sb->s_umount); + f2fs_stop_gc_thread(sbi); f2fs_stop_discard_thread(sbi);
f2fs_drop_discard_cmd(sbi); clear_opt(sbi, DISCARD);
+ if (need_lock) + up_read(&sbi->sb->s_umount); + f2fs_update_time(sbi, REQ_TIME); out: - if (in != F2FS_GOING_DOWN_FULLSYNC) - mnt_drop_write_file(filp);
- trace_f2fs_shutdown(sbi, in, ret); + trace_f2fs_shutdown(sbi, flag, ret); + + return ret; +} + +static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + __u32 in; + int ret; + bool need_drop = false, readonly = false; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(in, (__u32 __user *)arg)) + return -EFAULT; + + if (in != F2FS_GOING_DOWN_FULLSYNC) { + ret = mnt_want_write_file(filp); + if (ret) { + if (ret != -EROFS) + return ret; + + /* fallback to nosync shutdown for readonly fs */ + in = F2FS_GOING_DOWN_NOSYNC; + readonly = true; + } else { + need_drop = true; + } + } + + ret = f2fs_do_shutdown(sbi, in, readonly, true); + + if (need_drop) + mnt_drop_write_file(filp);
return ret; } @@ -2640,7 +2673,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
inode_lock(inode);
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { + if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) || + f2fs_is_atomic_file(inode)) { err = -EINVAL; goto unlock_out; } @@ -2663,7 +2697,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, * block addresses are continuous. */ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) { - if (ei.fofs + ei.len >= pg_end) + if ((pgoff_t)ei.fofs + ei.len >= pg_end) goto out; }
@@ -2746,6 +2780,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, goto clear_out; }
+ f2fs_wait_on_page_writeback(page, DATA, true, true); + set_page_dirty(page); set_page_private_gcing(page); f2fs_put_page(page, 1); @@ -2869,6 +2905,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, goto out_unlock; }
+ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) { + ret = -EINVAL; + goto out_unlock; + } + ret = -EINVAL; if (pos_in + len > src->i_size || pos_in + len < pos_in) goto out_unlock; @@ -3253,6 +3294,11 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) { + ret = -EINVAL; + goto out; + } + if (!pin) { clear_inode_flag(inode, FI_PIN_FILE); f2fs_i_gc_failures_write(inode, 0); @@ -4140,6 +4186,8 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) /* It will never fail, when page has pinned above */ f2fs_bug_on(F2FS_I_SB(inode), !page);
+ f2fs_wait_on_page_writeback(page, DATA, true, true); + set_page_dirty(page); set_page_private_gcing(page); f2fs_put_page(page, 1); @@ -4154,9 +4202,8 @@ static int f2fs_ioc_decompress_file(struct file *filp) struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); - pgoff_t page_idx = 0, last_idx; - int cluster_size = fi->i_cluster_size; - int count, ret; + pgoff_t page_idx = 0, last_idx, cluster_idx; + int ret;
if (!f2fs_sb_has_compression(sbi) || F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) @@ -4189,10 +4236,15 @@ static int f2fs_ioc_decompress_file(struct file *filp) goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + last_idx >>= fi->i_log_cluster_size; + + for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { + page_idx = cluster_idx << fi->i_log_cluster_size; + + if (!f2fs_is_compressed_cluster(inode, page_idx)) + continue;
- count = last_idx - page_idx; - while (count && count >= cluster_size) { - ret = redirty_blocks(inode, page_idx, cluster_size); + ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); if (ret < 0) break;
@@ -4202,9 +4254,6 @@ static int f2fs_ioc_decompress_file(struct file *filp) break; }
- count -= cluster_size; - page_idx += cluster_size; - cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; @@ -4230,9 +4279,9 @@ static int f2fs_ioc_compress_file(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - pgoff_t page_idx = 0, last_idx; - int cluster_size = F2FS_I(inode)->i_cluster_size; - int count, ret; + struct f2fs_inode_info *fi = F2FS_I(inode); + pgoff_t page_idx = 0, last_idx, cluster_idx; + int ret;
if (!f2fs_sb_has_compression(sbi) || F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) @@ -4264,10 +4313,15 @@ static int f2fs_ioc_compress_file(struct file *filp) set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + last_idx >>= fi->i_log_cluster_size; + + for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { + page_idx = cluster_idx << fi->i_log_cluster_size;
- count = last_idx - page_idx; - while (count && count >= cluster_size) { - ret = redirty_blocks(inode, page_idx, cluster_size); + if (f2fs_is_sparse_cluster(inode, page_idx)) + continue; + + ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); if (ret < 0) break;
@@ -4277,9 +4331,6 @@ static int f2fs_ioc_compress_file(struct file *filp) break; }
- count -= cluster_size; - page_idx += cluster_size; - cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; @@ -4538,6 +4589,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, iov_iter_count(to), READ);
+ /* In LFS mode, if there is inflight dio, wait for its completion */ + if (f2fs_lfs_mode(F2FS_I_SB(inode))) + inode_dio_wait(inode); + if (f2fs_should_use_dio(inode, iocb, to)) { ret = f2fs_dio_read_iter(iocb, to); } else { diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 709b2f79872f..a3e0c9273543 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -35,6 +35,11 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) if (f2fs_inode_dirtied(inode, sync)) return;
+ if (f2fs_is_atomic_file(inode)) { + set_inode_flag(inode, FI_ATOMIC_DIRTIED); + return; + } + mark_inode_dirty_sync(inode); }
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 7bca22e5dec4..2e08e1fdf485 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -455,63 +455,6 @@ struct dentry *f2fs_get_parent(struct dentry *child) return d_obtain_alias(f2fs_iget(child->d_sb, ino)); }
-static int __recover_dot_dentries(struct inode *dir, nid_t pino) -{ - struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - struct qstr dot = QSTR_INIT(".", 1); - struct qstr dotdot = QSTR_INIT("..", 2); - struct f2fs_dir_entry *de; - struct page *page; - int err = 0; - - if (f2fs_readonly(sbi->sb)) { - f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint", - dir->i_ino, pino); - return 0; - } - - if (!S_ISDIR(dir->i_mode)) { - f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)", - dir->i_ino, dir->i_mode, pino); - set_sbi_flag(sbi, SBI_NEED_FSCK); - return -ENOTDIR; - } - - err = f2fs_dquot_initialize(dir); - if (err) - return err; - - f2fs_balance_fs(sbi, true); - - f2fs_lock_op(sbi); - - de = f2fs_find_entry(dir, &dot, &page); - if (de) { - f2fs_put_page(page, 0); - } else if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out; - } else { - err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); - if (err) - goto out; - } - - de = f2fs_find_entry(dir, &dotdot, &page); - if (de) - f2fs_put_page(page, 0); - else if (IS_ERR(page)) - err = PTR_ERR(page); - else - err = f2fs_do_add_link(dir, &dotdot, NULL, pino, S_IFDIR); -out: - if (!err) - clear_inode_flag(dir, FI_INLINE_DOTS); - - f2fs_unlock_op(sbi); - return err; -} - static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { @@ -521,7 +464,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct dentry *new; nid_t ino = -1; int err = 0; - unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags); @@ -558,17 +500,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out; }
- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { - err = __recover_dot_dentries(dir, root_ino); - if (err) - goto out_iput; - } - - if (f2fs_has_inline_dots(inode)) { - err = __recover_dot_dentries(inode, dir->i_ino); - if (err) - goto out_iput; - } if (IS_ENCRYPTED(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index e3e2c0b2f495..c0ba379a6d8f 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -199,6 +199,10 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean) clear_inode_flag(inode, FI_ATOMIC_COMMITTED); clear_inode_flag(inode, FI_ATOMIC_REPLACE); clear_inode_flag(inode, FI_ATOMIC_FILE); + if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { + clear_inode_flag(inode, FI_ATOMIC_DIRTIED); + f2fs_mark_inode_dirty_sync(inode, true); + } stat_dec_atomic_inode(inode);
F2FS_I(inode)->atomic_write_task = NULL; @@ -368,6 +372,10 @@ static int __f2fs_commit_atomic_write(struct inode *inode) } else { sbi->committed_atomic_block += fi->atomic_write_cnt; set_inode_flag(inode, FI_ATOMIC_COMMITTED); + if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { + clear_inode_flag(inode, FI_ATOMIC_DIRTIED); + f2fs_mark_inode_dirty_sync(inode, true); + } }
__complete_revoke_list(inode, &revoke_list, ret ? true : false); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index e022d8233c0a..540fa1dfc77d 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2543,6 +2543,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) return err; }
+static void f2fs_shutdown(struct super_block *sb) +{ + f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false); +} + #ifdef CONFIG_QUOTA static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) { @@ -3142,6 +3147,7 @@ static const struct super_operations f2fs_sops = { .unfreeze_fs = f2fs_unfreeze, .statfs = f2fs_statfs, .remount_fs = f2fs_remount, + .shutdown = f2fs_shutdown, };
#ifdef CONFIG_FS_ENCRYPTION @@ -3330,9 +3336,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, u32 segment_count = le32_to_cpu(raw_super->segment_count); u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); u64 main_end_blkaddr = main_blkaddr + - (segment_count_main << log_blocks_per_seg); + ((u64)segment_count_main << log_blocks_per_seg); u64 seg_end_blkaddr = segment0_blkaddr + - (segment_count << log_blocks_per_seg); + ((u64)segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) { f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", @@ -4131,12 +4137,14 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, }
f2fs_warn(sbi, "Remounting filesystem read-only"); + /* - * Make sure updated value of ->s_mount_flags will be visible before - * ->s_flags update + * We have already set CP_ERROR_FLAG flag to stop all updates + * to filesystem, so it doesn't need to set SB_RDONLY flag here + * because the flag should be set covered w/ sb->s_umount semaphore + * via remount procedure, otherwise, it will confuse code like + * freeze_super() which will lead to deadlocks and other problems. */ - smp_wmb(); - sb->s_flags |= SB_RDONLY; }
static void f2fs_record_error_work(struct work_struct *work) diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index e47cc917d118..54ab9caaae4d 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *ipage, int flags) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_xattr_entry *here, *last; void *base_addr, *last_base_addr; int found, newsize; @@ -772,9 +773,18 @@ static int __f2fs_setxattr(struct inode *inode, int index, if (index == F2FS_XATTR_INDEX_ENCRYPTION && !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT)) f2fs_set_encrypted_inode(inode); - if (S_ISDIR(inode->i_mode)) - set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+ if (!S_ISDIR(inode->i_mode)) + goto same; + /* + * In restrict mode, fsync() always try to trigger checkpoint for all + * metadata consistency, in other mode, it triggers checkpoint when + * parent's xattr metadata was updated. + */ + if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) + set_sbi_flag(sbi, SBI_NEED_CP); + else + f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO); same: if (is_inode_flag_set(inode, FI_ACL_MODE)) { inode->i_mode = F2FS_I(inode)->i_acl_mode; diff --git a/fs/fcntl.c b/fs/fcntl.c index 9f606714d081..1484f062ee65 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -86,8 +86,8 @@ static int setfl(int fd, struct file * filp, unsigned int arg) return error; }
-static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, - int force) +void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, + int force) { write_lock_irq(&filp->f_owner.lock); if (force || !filp->f_owner.pid) { @@ -97,19 +97,13 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
if (pid) { const struct cred *cred = current_cred(); + security_file_set_fowner(filp); filp->f_owner.uid = cred->uid; filp->f_owner.euid = cred->euid; } } write_unlock_irq(&filp->f_owner.lock); } - -void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, - int force) -{ - security_file_set_fowner(filp); - f_modown(filp, pid, type, force); -} EXPORT_SYMBOL(__f_setown);
int f_setown(struct file *filp, int who, int force) @@ -145,7 +139,7 @@ EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp) { - f_modown(filp, NULL, PIDTYPE_TGID, 1); + __f_setown(filp, NULL, PIDTYPE_TGID, 1); }
pid_t f_getown(struct file *filp) diff --git a/fs/inode.c b/fs/inode.c index 776c2a049d07..9cafde77e2b0 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -757,6 +757,10 @@ void evict_inodes(struct super_block *sb) continue;
spin_lock(&inode->i_lock); + if (atomic_read(&inode->i_count)) { + spin_unlock(&inode->i_lock); + continue; + } if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); continue; diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 5713994328cb..0625d1c0d064 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap) }
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); - if (!bmp->db_numag) { + if (!bmp->db_numag || bmp->db_numag >= MAXAG) { err = -EINVAL; goto err_release_metapage; } @@ -652,7 +652,7 @@ int dbNextAG(struct inode *ipbmap) * average free space. */ for (i = 0 ; i < bmp->db_numag; i++, agpref++) { - if (agpref == bmp->db_numag) + if (agpref >= bmp->db_numag) agpref = 0;
if (atomic_read(&bmp->db_active[agpref])) diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 82d88dcf0ea6..b30e4cf2f579 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -1360,7 +1360,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) /* get the ag number of this iag */ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb)); dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag; - if (agno < 0 || agno > dn_numag) + if (agno < 0 || agno > dn_numag || agno >= MAXAG) return -EIO;
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { diff --git a/fs/namei.c b/fs/namei.c index e728ba085ebe..beffbb02a24e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2667,10 +2667,8 @@ static int lookup_one_common(struct mnt_idmap *idmap, if (!len) return -EACCES;
- if (unlikely(name[0] == '.')) { - if (len < 2 || (len == 2 && name[1] == '.')) - return -EACCES; - } + if (is_dot_dotdot(name, len)) + return -EACCES;
while (len--) { unsigned int c = *(const unsigned char *)name++; diff --git a/fs/namespace.c b/fs/namespace.c index e6c61d4997cc..b4385e2413d5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2796,8 +2796,15 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * if (!__mnt_is_readonly(mnt) && (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); + char *buf, *mntpath; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf) + mntpath = d_path(mountpoint, buf, PAGE_SIZE); + else + mntpath = ERR_PTR(-ENOMEM); + if (IS_ERR(mntpath)) + mntpath = "(unknown)";
pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n", sb->s_type->name, @@ -2805,8 +2812,9 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * mntpath, &sb->s_time_max, (unsigned long long)sb->s_time_max);
- free_page((unsigned long)buf); sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; + if (buf) + free_page((unsigned long)buf); } }
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index c95c50328ced..6c4539ed2654 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1957,6 +1957,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov set_bit(ops->owner_flag_bit, &sp->so_flags); nfs4_put_state_owner(sp); status = nfs4_recovery_handle_error(clp, status); + nfs4_free_state_owners(&freeme); return (status != 0) ? status : -EAGAIN; }
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 07bf219f9ae4..5c9f8f8404d5 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -1040,8 +1040,6 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, if (likely(ret == 0)) goto open_file;
- if (ret == -EEXIST) - goto retry; trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret); status = nfserr_jukebox; goto construction_err; @@ -1056,6 +1054,7 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, status = nfserr_jukebox; goto construction_err; } + nfsd_file_put(nf); open_retry = false; fh_put(fhp); goto retry; diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 7a806ac13e31..8cca1329f348 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -581,6 +581,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr, .id = id, .type = type, }; + __be32 status = nfs_ok; __be32 *p; int ret; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); @@ -593,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr, return nfserrno(ret); ret = strlen(item->name); WARN_ON_ONCE(ret > IDMAP_NAMESZ); + p = xdr_reserve_space(xdr, ret + 4); - if (!p) - return nfserr_resource; - p = xdr_encode_opaque(p, item->name, ret); + if (unlikely(!p)) { + status = nfserr_resource; + goto out_put; + } + xdr_encode_opaque(p, item->name, ret); +out_put: cache_put(&item->h, nn->idtoname_cache); - return 0; + return status; }
static bool diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 3509e73abe1f..4395577825a7 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -806,6 +806,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, ci = &cmsg->cm_u.cm_clntinfo; if (get_user(namelen, &ci->cc_name.cn_len)) return -EFAULT; + if (!namelen) { + dprintk("%s: namelen should not be zero", __func__); + return -EINVAL; + } name.data = memdup_user(&ci->cc_name.cn_id, namelen); if (IS_ERR(name.data)) return PTR_ERR(name.data); @@ -828,6 +832,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, cnm = &cmsg->cm_u.cm_name; if (get_user(namelen, &cnm->cn_len)) return -EFAULT; + if (!namelen) { + dprintk("%s: namelen should not be zero", __func__); + return -EINVAL; + } name.data = memdup_user(&cnm->cn_id, namelen); if (IS_ERR(name.data)) return PTR_ERR(name.data); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 598f05867059..dbd27a44632f 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || (flags & NILFS_BTREE_NODE_ROOT) || - nchildren < 0 || + nchildren <= 0 || nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { nilfs_crit(inode->i_sb, "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", @@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || - nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { + nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX || + (nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) { nilfs_crit(inode->i_sb, "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", inode->i_ino, level, flags, nchildren); @@ -1658,13 +1659,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) int nchildren, ret;
root = nilfs_btree_get_root(btree); + nchildren = nilfs_btree_node_get_nchildren(root); + if (unlikely(nchildren == 0)) + return 0; + switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; break; case 3: - nchildren = nilfs_btree_node_get_nchildren(root); if (nchildren > 1) return 0; ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, @@ -1673,12 +1677,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; + nchildren = nilfs_btree_node_get_nchildren(node); break; default: return 0; }
- nchildren = nilfs_btree_node_get_nchildren(node); maxkey = nilfs_btree_node_get_key(node, nchildren - 1); nextmaxkey = (nchildren > 1) ? nilfs_btree_node_get_key(node, nchildren - 2) : 0; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index dceb4bc76a66..2c548e8efef0 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -496,7 +496,7 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp, int err = 0;
if (work->conn->connection_type) { - if (!(fp->daccess & FILE_WRITE_DATA_LE)) { + if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) { pr_err("no right to write(%pD)\n", fp->filp); err = -EACCES; goto out; @@ -1110,9 +1110,10 @@ static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen, struct ksmbd_readdir_data *buf;
buf = container_of(ctx, struct ksmbd_readdir_data, ctx); - buf->dirent_count++; + if (!is_dot_dotdot(name, namlen)) + buf->dirent_count++;
- return buf->dirent_count <= 2; + return !buf->dirent_count; }
/** @@ -1132,7 +1133,7 @@ int ksmbd_vfs_empty_dir(struct ksmbd_file *fp) readdir_data.dirent_count = 0;
err = iterate_dir(fp->filp, &readdir_data.ctx); - if (readdir_data.dirent_count > 2) + if (readdir_data.dirent_count) err = -ENOTEMPTY; else err = 0; @@ -1161,7 +1162,7 @@ static bool __caseless_lookup(struct dir_context *ctx, const char *name, if (cmp < 0) cmp = strncasecmp((char *)buf->private, name, namlen); if (!cmp) { - memcpy((char *)buf->private, name, namlen); + memcpy((char *)buf->private, name, buf->used); buf->dirent_count = 1; return false; } @@ -1229,10 +1230,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, char *filepath; size_t path_len, remain_len;
- filepath = kstrdup(name, GFP_KERNEL); - if (!filepath) - return -ENOMEM; - + filepath = name; path_len = strlen(filepath); remain_len = path_len;
@@ -1275,10 +1273,9 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, err = -EINVAL; out2: path_put(parent_path); -out1: - kfree(filepath); }
+out1: if (!err) { err = mnt_want_write(parent_path->mnt); if (err) { diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index c0b69ffe7bdb..ec425d2834f8 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -64,6 +64,8 @@ struct cpc_desc { int cpu_id; int write_cmd_status; int write_cmd_id; + /* Lock used for RMW operations in cpc_write() */ + spinlock_t rmw_lock; struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; struct acpi_psd_package domain_info; struct kobject kobj; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 3c8d2b87a9ed..729ec2453149 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -77,6 +77,10 @@ struct device; * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst * bitmap_get_value8(map, start) Get 8bit value from map at start * bitmap_set_value8(map, value, start) Set 8bit value to map at start + * bitmap_read(map, start, nbits) Read an nbits-sized value from + * map at start + * bitmap_write(map, value, start, nbits) Write an nbits-sized value to + * map at start * * Note, bitmap_zero() and bitmap_fill() operate over the region of * unsigned longs, that is, bits behind bitmap till the unsigned long @@ -613,6 +617,79 @@ static inline void bitmap_set_value8(unsigned long *map, unsigned long value, map[index] |= value << offset; }
+/** + * bitmap_read - read a value of n-bits from the memory region + * @map: address to the bitmap memory region + * @start: bit offset of the n-bit value + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG + * + * Returns: value of @nbits bits located at the @start bit offset within the + * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return + * value is undefined. + */ +static inline unsigned long bitmap_read(const unsigned long *map, + unsigned long start, + unsigned long nbits) +{ + size_t index = BIT_WORD(start); + unsigned long offset = start % BITS_PER_LONG; + unsigned long space = BITS_PER_LONG - offset; + unsigned long value_low, value_high; + + if (unlikely(!nbits || nbits > BITS_PER_LONG)) + return 0; + + if (space >= nbits) + return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits); + + value_low = map[index] & BITMAP_FIRST_WORD_MASK(start); + value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits); + return (value_low >> offset) | (value_high << space); +} + +/** + * bitmap_write - write n-bit value within a memory region + * @map: address to the bitmap memory region + * @value: value to write, clamped to nbits + * @start: bit offset of the n-bit value + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG. + * + * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(), + * i.e. bits beyond @nbits are ignored: + * + * for (bit = 0; bit < nbits; bit++) + * __assign_bit(start + bit, bitmap, val & BIT(bit)); + * + * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed. + */ +static inline void bitmap_write(unsigned long *map, unsigned long value, + unsigned long start, unsigned long nbits) +{ + size_t index; + unsigned long offset; + unsigned long space; + unsigned long mask; + bool fit; + + if (unlikely(!nbits || nbits > BITS_PER_LONG)) + return; + + mask = BITMAP_LAST_WORD_MASK(nbits); + value &= mask; + offset = start % BITS_PER_LONG; + space = BITS_PER_LONG - offset; + fit = space >= nbits; + index = BIT_WORD(start); + + map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start)); + map[index] |= value << offset; + if (fit) + return; + + map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits); + map[index + 1] |= (value >> space); +} + #endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e4cd28c38b82..722102518dc9 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -675,6 +675,11 @@ enum bpf_type_flag { /* DYNPTR points to xdp_buff */ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+ /* Memory must be aligned on some architectures, used in combination with + * MEM_FIXED_SIZE. + */ + MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; @@ -711,8 +716,6 @@ enum bpf_arg_type { ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ - ARG_PTR_TO_INT, /* pointer to int */ - ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 1352a24d72ef..b9affa64b7fa 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -262,7 +262,7 @@ struct f2fs_extent { #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ -#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ +#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ #define F2FS_PIN_FILE 0x40 /* file should not be gced */ #define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 2a554e4045a9..6c3d86532e3f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2844,6 +2844,17 @@ extern bool path_is_under(const struct path *, const struct path *);
extern char *file_path(struct file *, char *, int);
+/** + * is_dot_dotdot - returns true only if @name is "." or ".." + * @name: file name to check + * @len: length of file name, in bytes + */ +static inline bool is_dot_dotdot(const char *name, size_t len) +{ + return len && unlikely(name[0] == '.') && + (len == 1 || (len == 2 && name[1] == '.')); +} + #include <linux/err.h>
/* needed for stackable file system support */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 830b925c2d00..b6a4d6471b4a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1732,8 +1732,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) unsigned int pid_bit;
pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG)); - if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) { - __set_bit(pid_bit, &vma->numab_state->access_pids[1]); + if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) { + __set_bit(pid_bit, &vma->numab_state->pids_active[1]); } } #else /* !CONFIG_NUMA_BALANCING */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ba25777ec0a7..43c19d85dfe7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -551,9 +551,36 @@ struct vma_lock { };
struct vma_numab_state { + /* + * Initialised as time in 'jiffies' after which VMA + * should be scanned. Delays first scan of new VMA by at + * least sysctl_numa_balancing_scan_delay: + */ unsigned long next_scan; - unsigned long next_pid_reset; - unsigned long access_pids[2]; + + /* + * Time in jiffies when pids_active[] is reset to + * detect phase change behaviour: + */ + unsigned long pids_active_reset; + + /* + * Approximate tracking of PIDs that trapped a NUMA hinting + * fault. May produce false positives due to hash collisions. + * + * [0] Previous PID tracking + * [1] Current PID tracking + * + * Window moves after next_pid_reset has expired approximately + * every VMA_PID_RESET_PERIOD jiffies: + */ + unsigned long pids_active[2]; + + /* + * MM scan sequence ID when the VMA was last completely scanned. + * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq + */ + int prev_scan_seq; };
/* diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index c09cdcc99471..189140bf11fc 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -40,7 +40,7 @@ struct sbitmap_word { /** * @swap_lock: serializes simultaneous updates of ->word and ->cleared */ - spinlock_t swap_lock; + raw_spinlock_t swap_lock; } ____cacheline_aligned_in_smp;
/** diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index 3988762efe15..b69afb8630db 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -15,6 +15,16 @@ #define TNF_FAULT_LOCAL 0x08 #define TNF_MIGRATE_FAIL 0x10
+enum numa_vmaskip_reason { + NUMAB_SKIP_UNSUITABLE, + NUMAB_SKIP_SHARED_RO, + NUMAB_SKIP_INACCESSIBLE, + NUMAB_SKIP_SCAN_DELAY, + NUMAB_SKIP_PID_INACTIVE, + NUMAB_SKIP_IGNORE_PID, + NUMAB_SKIP_SEQ_COMPLETED, +}; + #ifdef CONFIG_NUMA_BALANCING extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 9f08a584d707..0b9f1e598e3a 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -76,8 +76,23 @@ struct usbnet { # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 +/* This one is special, as it indicates that the device is going away + * there are cyclic dependencies between tasklet, timer and bh + * that must be broken + */ +# define EVENT_UNPLUG 31 };
+static inline bool usbnet_going_away(struct usbnet *ubn) +{ + return test_bit(EVENT_UNPLUG, &ubn->flags); +} + +static inline void usbnet_mark_going_away(struct usbnet *ubn) +{ + set_bit(EVENT_UNPLUG, &ubn->flags); +} + static inline struct usb_driver *driver_of(struct usb_interface *intf) { return to_usb_driver(intf->dev.driver); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index cb571dfcf4b1..d9d479334c9e 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1548,6 +1548,7 @@ void xas_create_range(struct xa_state *);
#ifdef CONFIG_XARRAY_MULTI int xa_get_order(struct xarray *, unsigned long index); +int xas_get_order(struct xa_state *xas); void xas_split(struct xa_state *, void *entry, unsigned int order); void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); #else @@ -1556,6 +1557,11 @@ static inline int xa_get_order(struct xarray *xa, unsigned long index) return 0; }
+static inline int xas_get_order(struct xa_state *xas) +{ + return 0; +} + static inline void xas_split(struct xa_state *xas, void *entry, unsigned int order) { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 29f1549ee111..0f50c0cefcb7 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -2224,8 +2224,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, bool mgmt_connected); void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); -void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status); +void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, + u8 status); void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); diff --git a/include/net/ip.h b/include/net/ip.h index 6f1ff4846451..7db5912e0c5f 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -786,6 +786,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) }
bool icmp_global_allow(void); +void icmp_global_consume(void); + extern int sysctl_icmp_msgs_per_sec; extern int sysctl_icmp_msgs_burst;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a39bd4169f29..47ade676565d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -936,8 +936,9 @@ enum mac80211_tx_info_flags { * of their QoS TID or other priority field values. * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally * for sequence number assignment - * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted - * due to scanning, not in normal operation on the interface. + * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame + * which is transmitted due to scanning or offchannel TX, not in normal + * operation on the interface. * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this * frame should be transmitted on the specific link. This really is * only relevant for frames that do not have data present, and is @@ -958,7 +959,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), IEEE80211_TX_CTRL_DONT_REORDER = BIT(8), IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9), - IEEE80211_TX_CTRL_SCAN_TX = BIT(10), + IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10), IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000, };
diff --git a/include/net/tcp.h b/include/net/tcp.h index c206ffaa8ed7..b3917af309e0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2230,9 +2230,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) { const struct sk_buff *skb = tcp_rtx_queue_head(sk); u32 rto = inet_csk(sk)->icsk_rto; - u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; + if (likely(skb)) { + u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); + + return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; + } else { + WARN_ONCE(1, + "rtx queue emtpy: " + "out:%u sacked:%u lost:%u retrans:%u " + "tlp_high_seq:%u sk_state:%u ca_state:%u " + "advmss:%u mss_cache:%u pmtu:%u\n", + tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, + tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, + tcp_sk(sk)->tlp_high_seq, sk->sk_state, + inet_csk(sk)->icsk_ca_state, + tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, + inet_csk(sk)->icsk_pmtu_cookie); + return jiffies_to_usecs(rto); + } + }
/* diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h index be58d870505a..f97f386e5a55 100644 --- a/include/sound/tas2781.h +++ b/include/sound/tas2781.h @@ -78,11 +78,6 @@ struct tasdevice { bool is_loaderr; };
-struct tasdevice_irqinfo { - int irq_gpio; - int irq; -}; - struct calidata { unsigned char *data; unsigned long total_sz; @@ -90,7 +85,6 @@ struct calidata {
struct tasdevice_priv { struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS]; - struct tasdevice_irqinfo irq_info; struct tasdevice_rca rcabin; struct calidata cali_data; struct tasdevice_fw *fmw; @@ -101,7 +95,6 @@ struct tasdevice_priv { struct tm tm;
enum device_catlog_id catlog_id; - const char *acpi_subsystem_id; unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64]; unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE]; unsigned char coef_binaryname[64]; @@ -112,6 +105,7 @@ struct tasdevice_priv { unsigned int chip_id; unsigned int sysclk;
+ int irq; int cur_prog; int cur_conf; int fw_state; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 793f82cc1515..b6ffae01a8cd 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE); { CP_NODE_NEED_CP, "node needs cp" }, \ { CP_FASTBOOT_MODE, "fastboot mode" }, \ { CP_SPEC_LOG_NUM, "log type is 2" }, \ - { CP_RECOVER_DIR, "dir needs recovery" }) + { CP_RECOVER_DIR, "dir needs recovery" }, \ + { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \ __print_symbolic(type, \ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index fbb99a61f714..010ba1b7cb0e 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -664,6 +664,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) );
+#ifdef CONFIG_NUMA_BALANCING +#define NUMAB_SKIP_REASON \ + EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \ + EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \ + EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \ + EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \ + EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \ + EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \ + EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" ) + +/* Redefine for export. */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +NUMAB_SKIP_REASON + +/* Redefine for symbolic printing. */ +#undef EM +#undef EMe +#define EM(a, b) { a, b }, +#define EMe(a, b) { a, b } + +TRACE_EVENT(sched_skip_vma_numa, + + TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma, + enum numa_vmaskip_reason reason), + + TP_ARGS(mm, vma, reason), + + TP_STRUCT__entry( + __field(unsigned long, numa_scan_offset) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(enum numa_vmaskip_reason, reason) + ), + + TP_fast_assign( + __entry->numa_scan_offset = mm->numa_scan_offset; + __entry->vm_start = vma->vm_start; + __entry->vm_end = vma->vm_end; + __entry->reason = reason; + ), + + TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s", + __entry->numa_scan_offset, + __entry->vm_start, + __entry->vm_end, + __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) +); +#endif /* CONFIG_NUMA_BALANCING */
/* * Tracepoint for waking a polling cpu without an IPI. diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 98c9cfb98306..a1e31723c9ed 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/rculist_nulls.h> #include <linux/cpu.h> +#include <linux/cpuset.h> #include <linux/task_work.h> #include <linux/audit.h> #include <linux/mmu_context.h> @@ -1169,7 +1170,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL)) goto err; - cpumask_copy(wq->cpu_mask, cpu_possible_mask); + cpuset_cpus_allowed(data->task, wq->cpu_mask); wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wq->acct[IO_WQ_ACCT_UNBOUND].max_workers = task_rlimit(current, RLIMIT_NPROC); @@ -1324,17 +1325,29 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) { + cpumask_var_t allowed_mask; + int ret = 0; + if (!tctx || !tctx->io_wq) return -EINVAL;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) + return -ENOMEM; + rcu_read_lock(); - if (mask) - cpumask_copy(tctx->io_wq->cpu_mask, mask); - else - cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask); + cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); + if (mask) { + if (cpumask_subset(mask, allowed_mask)) + cpumask_copy(tctx->io_wq->cpu_mask, mask); + else + ret = -EINVAL; + } else { + cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask); + } rcu_read_unlock();
- return 0; + free_cpumask_var(allowed_mask); + return ret; }
/* diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 68504709f75c..7b0a100e1139 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2514,7 +2514,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return 1; if (unlikely(!llist_empty(&ctx->work_llist))) return 1; - if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) + if (unlikely(task_work_pending(current))) return 1; if (unlikely(task_sigpending(current))) return -EINTR; @@ -2610,9 +2610,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, * If we got woken because of task_work being processed, run it * now rather than let the caller do another wait loop. */ - io_run_task_work(); if (!llist_empty(&ctx->work_llist)) io_run_local_work(ctx, nr_wait); + io_run_task_work();
/* * Non-local task_work will be run on exit to userspace, but diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 350436e55aaf..cdf8b567cb94 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/audit.h> #include <linux/security.h> +#include <linux/cpuset.h> #include <linux/io_uring.h>
#include <uapi/linux/io_uring.h> @@ -401,11 +402,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, return 0;
if (p->flags & IORING_SETUP_SQ_AFF) { + cpumask_var_t allowed_mask; int cpu = p->sq_thread_cpu;
ret = -EINVAL; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) goto err_sqpoll; + ret = -ENOMEM; + if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) + goto err_sqpoll; + ret = -EINVAL; + cpuset_cpus_allowed(current, allowed_mask); + if (!cpumask_test_cpu(cpu, allowed_mask)) { + free_cpumask_var(allowed_mask); + goto err_sqpoll; + } + free_cpumask_var(allowed_mask); sqd->sq_cpu = cpu; } else { sqd->sq_cpu = -1; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index fbf9721ba21b..e0e4d4f490e8 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8421,6 +8421,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, struct bpf_core_cand_list cands = {}; struct bpf_core_relo_res targ_res; struct bpf_core_spec *specs; + const struct btf_type *type; int err;
/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" @@ -8430,6 +8431,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, if (!specs) return -ENOMEM;
+ type = btf_type_by_id(ctx->btf, relo->type_id); + if (!type) { + bpf_log(ctx->log, "relo #%u: bad type id %u\n", + relo_idx, relo->type_id); + return -EINVAL; + } + if (need_cands) { struct bpf_cand_cache *cc; int i; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 9ab6be965305..3dba5bb294d8 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -516,11 +516,12 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, }
BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, - long *, res) + s64 *, res) { long long _res; int err;
+ *res = 0; err = __bpf_strtoll(buf, buf_len, flags, &_res); if (err < 0) return err; @@ -537,16 +538,18 @@ const struct bpf_func_proto bpf_strtol_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(s64), };
BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, - unsigned long *, res) + u64 *, res) { unsigned long long _res; bool is_negative; int err;
+ *res = 0; err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); if (err < 0) return err; @@ -565,7 +568,8 @@ const struct bpf_func_proto bpf_strtoul_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(u64), };
BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 65df92f5b192..b1933d074f05 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -5647,6 +5647,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) { + *res = 0; if (flags) return -EINVAL;
@@ -5667,7 +5668,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { .arg1_type = ARG_PTR_TO_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(u64), };
static const struct bpf_func_proto * diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9d5699942273..834394faf2af 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7987,6 +7987,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type) type == ARG_CONST_SIZE_OR_ZERO; }
+static bool arg_type_is_raw_mem(enum bpf_arg_type type) +{ + return base_type(type) == ARG_PTR_TO_MEM && + type & MEM_UNINIT; +} + static bool arg_type_is_release(enum bpf_arg_type type) { return type & OBJ_RELEASE; @@ -7997,16 +8003,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type) return base_type(type) == ARG_PTR_TO_DYNPTR; }
-static int int_ptr_type_to_size(enum bpf_arg_type type) -{ - if (type == ARG_PTR_TO_INT) - return sizeof(u32); - else if (type == ARG_PTR_TO_LONG) - return sizeof(u64); - - return -EINVAL; -} - static int resolve_map_arg_type(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_arg_type *arg_type) @@ -8079,16 +8075,6 @@ static const struct bpf_reg_types mem_types = { }, };
-static const struct bpf_reg_types int_ptr_types = { - .types = { - PTR_TO_STACK, - PTR_TO_PACKET, - PTR_TO_PACKET_META, - PTR_TO_MAP_KEY, - PTR_TO_MAP_VALUE, - }, -}; - static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE, @@ -8143,8 +8129,6 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, [ARG_PTR_TO_MEM] = &mem_types, [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, - [ARG_PTR_TO_INT] = &int_ptr_types, - [ARG_PTR_TO_LONG] = &int_ptr_types, [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK] = &stack_ptr_types, @@ -8651,9 +8635,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, */ meta->raw_mode = arg_type & MEM_UNINIT; if (arg_type & MEM_FIXED_SIZE) { - err = check_helper_mem_access(env, regno, - fn->arg_size[arg], false, - meta); + err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta); + if (err) + return err; + if (arg_type & MEM_ALIGNED) + err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); } break; case ARG_CONST_SIZE: @@ -8678,17 +8664,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; break; - case ARG_PTR_TO_INT: - case ARG_PTR_TO_LONG: - { - int size = int_ptr_type_to_size(arg_type); - - err = check_helper_mem_access(env, regno, size, false, meta); - if (err) - return err; - err = check_ptr_alignment(env, reg, 0, size, true); - break; - } case ARG_PTR_TO_CONST_STR: { struct bpf_map *map = reg->map_ptr; @@ -9040,15 +9015,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0;
- if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg1_type)) count++; - if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg2_type)) count++; - if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg3_type)) count++; - if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg4_type)) count++; - if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg5_type)) count++;
/* We only support one arg being in raw mode at the moment, diff --git a/kernel/kthread.c b/kernel/kthread.c index 290cbc845225..ac03bc55e17c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -844,8 +844,16 @@ int kthread_worker_fn(void *worker_ptr) * event only cares about the address. */ trace_sched_kthread_work_execute_end(work, func); - } else if (!freezing(current)) + } else if (!freezing(current)) { schedule(); + } else { + /* + * Handle the case where the current remains + * TASK_INTERRUPTIBLE. try_to_freeze() expects + * the current to be TASK_RUNNING. + */ + __set_current_state(TASK_RUNNING); + }
try_to_freeze(); cond_resched(); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 151bd3de5936..3468d8230e5f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -6184,25 +6184,27 @@ static struct pending_free *get_pending_free(void) static void free_zapped_rcu(struct rcu_head *cb);
/* - * Schedule an RCU callback if no RCU callback is pending. Must be called with - * the graph lock held. - */ -static void call_rcu_zapped(struct pending_free *pf) +* See if we need to queue an RCU callback, must called with +* the lockdep lock held, returns false if either we don't have +* any pending free or the callback is already scheduled. +* Otherwise, a call_rcu() must follow this function call. +*/ +static bool prepare_call_rcu_zapped(struct pending_free *pf) { WARN_ON_ONCE(inside_selftest());
if (list_empty(&pf->zapped)) - return; + return false;
if (delayed_free.scheduled) - return; + return false;
delayed_free.scheduled = true;
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); delayed_free.index ^= 1;
- call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + return true; }
/* The caller must hold the graph lock. May be called from RCU context. */ @@ -6228,6 +6230,7 @@ static void free_zapped_rcu(struct rcu_head *ch) { struct pending_free *pf; unsigned long flags; + bool need_callback;
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) return; @@ -6239,14 +6242,18 @@ static void free_zapped_rcu(struct rcu_head *ch) pf = delayed_free.pf + (delayed_free.index ^ 1); __free_zapped_classes(pf); delayed_free.scheduled = false; + need_callback = + prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index); + lockdep_unlock(); + raw_local_irq_restore(flags);
/* - * If there's anything on the open list, close and start a new callback. - */ - call_rcu_zapped(delayed_free.pf + delayed_free.index); + * If there's pending free and its callback has not been scheduled, + * queue an RCU callback. + */ + if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
- lockdep_unlock(); - raw_local_irq_restore(flags); }
/* @@ -6286,6 +6293,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) { struct pending_free *pf; unsigned long flags; + bool need_callback;
init_data_structures_once();
@@ -6293,10 +6301,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) lockdep_lock(); pf = get_pending_free(); __lockdep_free_key_range(pf, start, size); - call_rcu_zapped(pf); + need_callback = prepare_call_rcu_zapped(pf); lockdep_unlock(); raw_local_irq_restore(flags); - + if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); /* * Wait for any possible iterators from look_up_lock_class() to pass * before continuing to free the memory they refer to. @@ -6390,6 +6399,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) struct pending_free *pf; unsigned long flags; int locked; + bool need_callback = false;
raw_local_irq_save(flags); locked = graph_lock(); @@ -6398,11 +6408,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
pf = get_pending_free(); __lockdep_reset_lock(pf, lock); - call_rcu_zapped(pf); + need_callback = prepare_call_rcu_zapped(pf);
graph_unlock(); out_irq: raw_local_irq_restore(flags); + if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); }
/* @@ -6446,6 +6458,7 @@ void lockdep_unregister_key(struct lock_class_key *key) struct pending_free *pf; unsigned long flags; bool found = false; + bool need_callback = false;
might_sleep();
@@ -6466,11 +6479,14 @@ void lockdep_unregister_key(struct lock_class_key *key) if (found) { pf = get_pending_free(); __lockdep_free_key_range(pf, key, 1); - call_rcu_zapped(pf); + need_callback = prepare_call_rcu_zapped(pf); } lockdep_unlock(); raw_local_irq_restore(flags);
+ if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ synchronize_rcu(); } diff --git a/kernel/module/Makefile b/kernel/module/Makefile index a10b2b9a6fdf..50ffcc413b54 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -5,7 +5,7 @@
# These are called from save_stack_trace() on slub debug path, # and produce insane amounts of uninteresting coverage. -KCOV_INSTRUMENT_module.o := n +KCOV_INSTRUMENT_main.o := n
obj-y += main.o obj-y += strict_rwx.o diff --git a/kernel/padata.c b/kernel/padata.c index 29545dd6dd53..9bf77b58ee08 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata) /* Sort in ascending order of sequence number. */ list_for_each_prev(pos, &reorder->list) { cur = list_entry(pos, struct padata_priv, list); - if (cur->seq_nr < padata->seq_nr) + /* Compare by difference to consider integer wrap around */ + if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) break; } list_add(&padata->list, pos); @@ -511,9 +512,12 @@ void __init padata_do_multithreaded(struct padata_mt_job *job) * thread function. Load balance large jobs between threads by * increasing the number of chunks, guarantee at least the minimum * chunk size from the caller, and honor the caller's alignment. + * Ensure chunk_size is at least 1 to prevent divide-by-0 + * panic in padata_mt_helper(). */ ps.chunk_size = job->size / (ps.nworks * load_balance_factor); ps.chunk_size = max(ps.chunk_size, job->min_chunk); + ps.chunk_size = max(ps.chunk_size, 1ul); ps.chunk_size = roundup(ps.chunk_size, job->align);
/* diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 30b34f215ca3..e019f166daa6 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -220,7 +220,10 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); if (needwake) { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); - wake_up_process(rdp_gp->nocb_gp_kthread); + if (cpu_is_offline(raw_smp_processor_id())) + swake_up_one_online(&rdp_gp->nocb_gp_wq); + else + wake_up_process(rdp_gp->nocb_gp_kthread); }
return needwake; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b2e1009e5706..5eb4807bad20 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -533,7 +533,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
static int se_is_idle(struct sched_entity *se) { - return 0; + return task_has_idle_policy(task_of(se)); }
#endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -3188,7 +3188,7 @@ static void reset_ptenuma_scan(struct task_struct *p) p->mm->numa_scan_offset = 0; }
-static bool vma_is_accessed(struct vm_area_struct *vma) +static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long pids; /* @@ -3200,8 +3200,29 @@ static bool vma_is_accessed(struct vm_area_struct *vma) if (READ_ONCE(current->mm->numa_scan_seq) < 2) return true;
- pids = vma->numab_state->access_pids[0] | vma->numab_state->access_pids[1]; - return test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids); + pids = vma->numab_state->pids_active[0] | vma->numab_state->pids_active[1]; + if (test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids)) + return true; + + /* + * Complete a scan that has already started regardless of PID access, or + * some VMAs may never be scanned in multi-threaded applications: + */ + if (mm->numa_scan_offset > vma->vm_start) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_IGNORE_PID); + return true; + } + + /* + * This vma has not been accessed for a while, and if the number + * the threads in the same process is low, which means no other + * threads can help scan this vma, force a vma scan. + */ + if (READ_ONCE(mm->numa_scan_seq) > + (vma->numab_state->prev_scan_seq + get_nr_threads(current))) + return true; + + return false; }
#define VMA_PID_RESET_PERIOD (4 * sysctl_numa_balancing_scan_delay) @@ -3221,6 +3242,8 @@ static void task_numa_work(struct callback_head *work) unsigned long nr_pte_updates = 0; long pages, virtpages; struct vma_iterator vmi; + bool vma_pids_skipped; + bool vma_pids_forced = false;
SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
@@ -3263,7 +3286,6 @@ static void task_numa_work(struct callback_head *work) */ p->node_stamp += 2 * TICK_NSEC;
- start = mm->numa_scan_offset; pages = sysctl_numa_balancing_scan_size; pages <<= 20 - PAGE_SHIFT; /* MB in pages */ virtpages = pages * 8; /* Scan up to this much virtual space */ @@ -3273,6 +3295,16 @@ static void task_numa_work(struct callback_head *work)
if (!mmap_read_trylock(mm)) return; + + /* + * VMAs are skipped if the current PID has not trapped a fault within + * the VMA recently. Allow scanning to be forced if there is no + * suitable VMA remaining. + */ + vma_pids_skipped = false; + +retry_pids: + start = mm->numa_scan_offset; vma_iter_init(&vmi, mm, start); vma = vma_next(&vmi); if (!vma) { @@ -3285,6 +3317,7 @@ static void task_numa_work(struct callback_head *work) do { if (!vma_migratable(vma) || !vma_policy_mof(vma) || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_UNSUITABLE); continue; }
@@ -3295,15 +3328,19 @@ static void task_numa_work(struct callback_head *work) * as migrating the pages will be of marginal benefit. */ if (!vma->vm_mm || - (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) + (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SHARED_RO); continue; + }
/* * Skip inaccessible VMAs to avoid any confusion between * PROT_NONE and NUMA hinting ptes */ - if (!vma_is_accessible(vma)) + if (!vma_is_accessible(vma)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_INACCESSIBLE); continue; + }
/* Initialise new per-VMA NUMAB state. */ if (!vma->numab_state) { @@ -3316,8 +3353,15 @@ static void task_numa_work(struct callback_head *work) msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
/* Reset happens after 4 times scan delay of scan start */ - vma->numab_state->next_pid_reset = vma->numab_state->next_scan + + vma->numab_state->pids_active_reset = vma->numab_state->next_scan + msecs_to_jiffies(VMA_PID_RESET_PERIOD); + + /* + * Ensure prev_scan_seq does not match numa_scan_seq, + * to prevent VMAs being skipped prematurely on the + * first scan: + */ + vma->numab_state->prev_scan_seq = mm->numa_scan_seq - 1; }
/* @@ -3325,23 +3369,35 @@ static void task_numa_work(struct callback_head *work) * delay the scan for new VMAs. */ if (mm->numa_scan_seq && time_before(jiffies, - vma->numab_state->next_scan)) + vma->numab_state->next_scan)) { + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SCAN_DELAY); continue; + }
- /* Do not scan the VMA if task has not accessed */ - if (!vma_is_accessed(vma)) + /* RESET access PIDs regularly for old VMAs. */ + if (mm->numa_scan_seq && + time_after(jiffies, vma->numab_state->pids_active_reset)) { + vma->numab_state->pids_active_reset = vma->numab_state->pids_active_reset + + msecs_to_jiffies(VMA_PID_RESET_PERIOD); + vma->numab_state->pids_active[0] = READ_ONCE(vma->numab_state->pids_active[1]); + vma->numab_state->pids_active[1] = 0; + } + + /* Do not rescan VMAs twice within the same sequence. */ + if (vma->numab_state->prev_scan_seq == mm->numa_scan_seq) { + mm->numa_scan_offset = vma->vm_end; + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_SEQ_COMPLETED); continue; + }
/* - * RESET access PIDs regularly for old VMAs. Resetting after checking - * vma for recent access to avoid clearing PID info before access.. + * Do not scan the VMA if task has not accessed it, unless no other + * VMA candidate exists. */ - if (mm->numa_scan_seq && - time_after(jiffies, vma->numab_state->next_pid_reset)) { - vma->numab_state->next_pid_reset = vma->numab_state->next_pid_reset + - msecs_to_jiffies(VMA_PID_RESET_PERIOD); - vma->numab_state->access_pids[0] = READ_ONCE(vma->numab_state->access_pids[1]); - vma->numab_state->access_pids[1] = 0; + if (!vma_pids_forced && !vma_is_accessed(mm, vma)) { + vma_pids_skipped = true; + trace_sched_skip_vma_numa(mm, vma, NUMAB_SKIP_PID_INACTIVE); + continue; }
do { @@ -3368,8 +3424,28 @@ static void task_numa_work(struct callback_head *work)
cond_resched(); } while (end != vma->vm_end); + + /* VMA scan is complete, do not scan until next sequence. */ + vma->numab_state->prev_scan_seq = mm->numa_scan_seq; + + /* + * Only force scan within one VMA at a time, to limit the + * cost of scanning a potentially uninteresting VMA. + */ + if (vma_pids_forced) + break; } for_each_vma(vmi, vma);
+ /* + * If no VMAs are remaining and VMAs were skipped due to the PID + * not accessing the VMA previously, then force a scan to ensure + * forward progress: + */ + if (!vma && !vma_pids_forced && vma_pids_skipped) { + vma_pids_forced = true; + goto retry_pids; + } + out: /* * It is possible to reach the end of the VMA list but the last few @@ -8209,16 +8285,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (test_tsk_need_resched(curr)) return;
- /* Idle tasks are by definition preempted by non-idle tasks. */ - if (unlikely(task_has_idle_policy(curr)) && - likely(!task_has_idle_policy(p))) - goto preempt; - - /* - * Batch and idle tasks do not preempt non-idle tasks (their preemption - * is driven by the tick): - */ - if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) + if (!sched_feat(WAKEUP_PREEMPTION)) return;
find_matching_se(&se, &pse); @@ -8228,7 +8295,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ pse_is_idle = se_is_idle(pse);
/* - * Preempt an idle group in favor of a non-idle group (and don't preempt + * Preempt an idle entity in favor of a non-idle entity (and don't preempt * in the inverse case). */ if (cse_is_idle && !pse_is_idle) @@ -8236,9 +8303,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (cse_is_idle != pse_is_idle) return;
+ /* + * BATCH and IDLE tasks do not preempt others. + */ + if (unlikely(p->policy != SCHED_NORMAL)) + return; + cfs_rq = cfs_rq_of(se); update_curr(cfs_rq); - /* * XXX pick_eevdf(cfs_rq) != se ? */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index cc29bf49f715..eca858bde804 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1220,7 +1220,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_LONG, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u64), };
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) @@ -1236,7 +1237,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = { .func = get_func_ret, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_LONG, + .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg2_size = sizeof(u64), };
BPF_CALL_1(get_func_arg_cnt, void *, ctx) @@ -3283,17 +3285,20 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr uprobes[i].ref_ctr_offset, &uprobes[i].consumer); if (err) { - bpf_uprobe_unregister(&path, uprobes, i); - goto error_free; + link->cnt = i; + goto error_unregister; } }
err = bpf_link_prime(&link->link, &link_primer); if (err) - goto error_free; + goto error_unregister;
return bpf_link_settle(&link_primer);
+error_unregister: + bpf_uprobe_unregister(&path, uprobes, link->cnt); + error_free: kvfree(uprobes); kfree(link); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index c90834a209b5..9d401355d560 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -141,13 +141,14 @@ static void fill_pool(void) * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical * sections. */ - while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { + while (READ_ONCE(obj_nr_tofree) && + READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { raw_spin_lock_irqsave(&pool_lock, flags); /* * Recheck with the lock held as the worker thread might have * won the race and freed the global free list already. */ - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { + while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 9307bf17a817..1d5e15748692 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, { unsigned long mask, word_mask;
- guard(spinlock_irqsave)(&map->swap_lock); + guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) { if (depth == 0) @@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, }
for (i = 0; i < sb->map_nr; i++) - spin_lock_init(&sb->map[i].swap_lock); + raw_spin_lock_init(&sb->map[i].swap_lock);
return 0; } diff --git a/lib/test_xarray.c b/lib/test_xarray.c index e77d4856442c..542926da61a3 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1756,6 +1756,97 @@ static noinline void check_get_order(struct xarray *xa) } }
+static noinline void check_xas_get_order(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned int order; + unsigned long i, j; + + for (order = 0; order < max_order; order++) { + for (i = 0; i < 10; i++) { + xas_set_order(&xas, i << order, order); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(i)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + for (j = i << order; j < (i + 1) << order; j++) { + xas_set_order(&xas, j, 0); + rcu_read_lock(); + xas_load(&xas); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + rcu_read_unlock(); + } + + xas_lock(&xas); + xas_set_order(&xas, i << order, order); + xas_store(&xas, NULL); + xas_unlock(&xas); + } + } +} + +static noinline void check_xas_conflict_get_order(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + void *entry; + int only_once; + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned int order; + unsigned long i, j, k; + + for (order = 0; order < max_order; order++) { + for (i = 0; i < 10; i++) { + xas_set_order(&xas, i << order, order); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(i)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + /* + * Ensure xas_get_order works with xas_for_each_conflict. + */ + j = i << order; + for (k = 0; k < order; k++) { + only_once = 0; + xas_set_order(&xas, j + (1 << k), k); + xas_lock(&xas); + xas_for_each_conflict(&xas, entry) { + XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + only_once++; + } + XA_BUG_ON(xa, only_once != 1); + xas_unlock(&xas); + } + + if (order < max_order - 1) { + only_once = 0; + xas_set_order(&xas, (i & ~1UL) << order, order + 1); + xas_lock(&xas); + xas_for_each_conflict(&xas, entry) { + XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + only_once++; + } + XA_BUG_ON(xa, only_once != 1); + xas_unlock(&xas); + } + + xas_set_order(&xas, i << order, order); + xas_lock(&xas); + xas_store(&xas, NULL); + xas_unlock(&xas); + } + } +} + + static noinline void check_destroy(struct xarray *xa) { unsigned long index; @@ -1805,6 +1896,8 @@ static int xarray_checks(void) check_reserve(&xa0); check_multi_store(&array); check_get_order(&array); + check_xas_get_order(&array); + check_xas_conflict_get_order(&array); check_xa_alloc(); check_find(&array); check_find_entry(&array); diff --git a/lib/xarray.c b/lib/xarray.c index 39f07bfc4dcc..da79128ad754 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1750,39 +1750,52 @@ void *xa_store_range(struct xarray *xa, unsigned long first, EXPORT_SYMBOL(xa_store_range);
/** - * xa_get_order() - Get the order of an entry. - * @xa: XArray. - * @index: Index of the entry. + * xas_get_order() - Get the order of an entry. + * @xas: XArray operation state. + * + * Called after xas_load, the xas should not be in an error state. * * Return: A number between 0 and 63 indicating the order of the entry. */ -int xa_get_order(struct xarray *xa, unsigned long index) +int xas_get_order(struct xa_state *xas) { - XA_STATE(xas, xa, index); - void *entry; int order = 0;
- rcu_read_lock(); - entry = xas_load(&xas); - - if (!entry) - goto unlock; - - if (!xas.xa_node) - goto unlock; + if (!xas->xa_node) + return 0;
for (;;) { - unsigned int slot = xas.xa_offset + (1 << order); + unsigned int slot = xas->xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE) break; - if (!xa_is_sibling(xas.xa_node->slots[slot])) + if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot))) break; order++; }
- order += xas.xa_node->shift; -unlock: + order += xas->xa_node->shift; + return order; +} +EXPORT_SYMBOL_GPL(xas_get_order); + +/** + * xa_get_order() - Get the order of an entry. + * @xa: XArray. + * @index: Index of the entry. + * + * Return: A number between 0 and 63 indicating the order of the entry. + */ +int xa_get_order(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + int order = 0; + void *entry; + + rcu_read_lock(); + entry = xas_load(&xas); + if (entry) + order = xas_get_order(&xas); rcu_read_unlock();
return order; diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c index 88a2c35e1b59..5627b00fca29 100644 --- a/lib/xz/xz_crc32.c +++ b/lib/xz/xz_crc32.c @@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void) { - const uint32_t poly = CRC32_POLY_LE; + const uint32_t poly = 0xEDB88320;
uint32_t i; uint32_t j; diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h index bf1e94ec7873..d9fd49b45fd7 100644 --- a/lib/xz/xz_private.h +++ b/lib/xz/xz_private.h @@ -105,10 +105,6 @@ # endif #endif
-#ifndef CRC32_POLY_LE -#define CRC32_POLY_LE 0xedb88320 -#endif - /* * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used * before calling xz_dec_lzma2_run(). diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index cf8a9fc5c9d1..530f01fedd35 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, * If this is too slow, it can be optimised to examine the maple * tree gaps. */ + rcu_read_lock(); for_each_vma(vmi, vma) { unsigned long gap;
@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, next: prev = vma; } + rcu_read_unlock();
if (!sz_range(&second_gap) || !sz_range(&first_gap)) return -EINVAL; diff --git a/mm/filemap.c b/mm/filemap.c index 2662c416e7fa..e6c112f3a211 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -846,6 +846,8 @@ noinline int __filemap_add_folio(struct address_space *mapping, { XA_STATE(xas, &mapping->i_pages, index); int huge = folio_test_hugetlb(folio); + void *alloced_shadow = NULL; + int alloced_order = 0; bool charged = false; long nr = 1;
@@ -868,13 +870,10 @@ noinline int __filemap_add_folio(struct address_space *mapping, folio->mapping = mapping; folio->index = xas.xa_index;
- do { - unsigned int order = xa_get_order(xas.xa, xas.xa_index); + for (;;) { + int order = -1, split_order = 0; void *entry, *old = NULL;
- if (order > folio_order(folio)) - xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), - order, gfp); xas_lock_irq(&xas); xas_for_each_conflict(&xas, entry) { old = entry; @@ -882,19 +881,33 @@ noinline int __filemap_add_folio(struct address_space *mapping, xas_set_err(&xas, -EEXIST); goto unlock; } + /* + * If a larger entry exists, + * it will be the first and only entry iterated. + */ + if (order == -1) + order = xas_get_order(&xas); + } + + /* entry may have changed before we re-acquire the lock */ + if (alloced_order && (old != alloced_shadow || order != alloced_order)) { + xas_destroy(&xas); + alloced_order = 0; }
if (old) { - if (shadowp) - *shadowp = old; - /* entry may have been split before we acquired lock */ - order = xa_get_order(xas.xa, xas.xa_index); - if (order > folio_order(folio)) { + if (order > 0 && order > folio_order(folio)) { /* How to handle large swap entries? */ BUG_ON(shmem_mapping(mapping)); + if (!alloced_order) { + split_order = order; + goto unlock; + } xas_split(&xas, old, order); xas_reset(&xas); } + if (shadowp) + *shadowp = old; }
xas_store(&xas, folio); @@ -910,9 +923,24 @@ noinline int __filemap_add_folio(struct address_space *mapping, __lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr); } + unlock: xas_unlock_irq(&xas); - } while (xas_nomem(&xas, gfp)); + + /* split needed, alloc here and retry. */ + if (split_order) { + xas_split_alloc(&xas, old, split_order, gfp); + if (xas_error(&xas)) + goto error; + alloced_shadow = old; + alloced_order = split_order; + xas_reset(&xas); + continue; + } + + if (!xas_nomem(&xas, gfp)) + break; + }
if (xas_error(&xas)) goto error; diff --git a/mm/mmap.c b/mm/mmap.c index 84e49e8bf010..6530e9cac458 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3025,8 +3025,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, flags |= MAP_LOCKED;
file = get_file(vma->vm_file); + ret = security_mmap_file(vma->vm_file, prot, flags); + if (ret) + goto out_fput; ret = do_mmap(vma->vm_file, start, size, prot, flags, 0, pgoff, &populate, NULL); +out_fput: fput(file); out: mmap_write_unlock(mm); diff --git a/mm/util.c b/mm/util.c index 2d5a309c4e54..08d494896552 100644 --- a/mm/util.c +++ b/mm/util.c @@ -434,7 +434,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) if (gap + pad > gap) gap += pad;
- if (gap < MIN_GAP) + if (gap < MIN_GAP && MIN_GAP < MAX_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index d8a01eb016ad..ed95a87ef9ab 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -107,8 +107,7 @@ void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) * where a timeout + cancel does indicate an actual failure. */ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, status); + mgmt_connect_failed(hdev, conn, status);
/* The connection attempt was doing scan for new RPA, and is * in scan phase. If params are not associated with any other @@ -1233,8 +1232,7 @@ void hci_conn_failed(struct hci_conn *conn, u8 status) hci_le_conn_failed(conn, status); break; case ACL_LINK: - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, status); + mgmt_connect_failed(hdev, conn, status); break; }
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index af7817a7c585..75515a1d2923 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -5391,7 +5391,10 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) if (!e) return 0;
- return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + /* Ignore cancel errors since it should interfere with stopping + * of the discovery. + */ + hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); }
return 0; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 4ae9029b5785..149aff29e564 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -9724,13 +9724,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_pending_remove(cmd); }
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status) +void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status) { struct mgmt_ev_connect_failed ev;
- bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_bdaddr(link_type, addr_type); + if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { + mgmt_device_disconnected(hdev, &conn->dst, conn->type, + conn->dst_type, status, true); + return; + } + + bacpy(&ev.addr.bdaddr, &conn->dst); + ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev.status = mgmt_status(status);
mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); diff --git a/net/can/bcm.c b/net/can/bcm.c index 00208ee13e57..a1f5db0fd5d4 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1429,8 +1429,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, /* remove device reference, if this is our bound device */ if (bo->bound && bo->ifindex == dev->ifindex) { #if IS_ENABLED(CONFIG_PROC_FS) - if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) + if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) { remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); + bo->bcm_proc_read = NULL; + } #endif bo->bound = 0; bo->ifindex = 0; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 4be73de5033c..319f47df3330 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1179,10 +1179,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) break; case -ENETDOWN: /* In this case we should get a netdev_event(), all active - * sessions will be cleared by - * j1939_cancel_all_active_sessions(). So handle this as an - * error, but let j1939_cancel_all_active_sessions() do the - * cleanup including propagation of the error to user space. + * sessions will be cleared by j1939_cancel_active_session(). + * So handle this as an error, but let + * j1939_cancel_active_session() do the cleanup including + * propagation of the error to user space. */ break; case -EOVERFLOW: diff --git a/net/core/filter.c b/net/core/filter.c index be313928d272..8bfd46a070c1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6222,20 +6222,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; struct net_device *dev = skb->dev; int skb_len, dev_len; - int mtu; + int mtu = 0;
- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) - return -EINVAL; + if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) { + ret = -EINVAL; + goto out; + }
- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) - return -EINVAL; + if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) { + ret = -EINVAL; + goto out; + }
dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) - return -ENODEV; + if (unlikely(!dev)) { + ret = -ENODEV; + goto out; + }
mtu = READ_ONCE(dev->mtu); - dev_len = mtu + dev->hard_header_len;
/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ @@ -6253,15 +6258,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, */ if (skb_is_gso(skb)) { ret = BPF_MTU_CHK_RET_SUCCESS; - if (flags & BPF_MTU_CHK_SEGS && !skb_gso_validate_network_len(skb, mtu)) ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; } out: - /* BPF verifier guarantees valid pointer */ *mtu_len = mtu; - return ret; }
@@ -6271,19 +6273,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, struct net_device *dev = xdp->rxq->dev; int xdp_len = xdp->data_end - xdp->data; int ret = BPF_MTU_CHK_RET_SUCCESS; - int mtu, dev_len; + int mtu = 0, dev_len;
/* XDP variant doesn't support multi-buffer segment check (yet) */ - if (unlikely(flags)) - return -EINVAL; + if (unlikely(flags)) { + ret = -EINVAL; + goto out; + }
dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) - return -ENODEV; + if (unlikely(!dev)) { + ret = -ENODEV; + goto out; + }
mtu = READ_ONCE(dev->mtu); - - /* Add L2-header as dev MTU is L3 size */ dev_len = mtu + dev->hard_header_len;
/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ @@ -6293,10 +6297,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, xdp_len += len_diff; /* minus result pass check */ if (xdp_len > dev_len) ret = BPF_MTU_CHK_RET_FRAG_NEEDED; - - /* BPF verifier guarantees valid pointer */ +out: *mtu_len = mtu; - return ret; }
@@ -6306,7 +6308,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6317,7 +6320,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index a37143d181f9..2afac40bb83c 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1171,6 +1171,7 @@ static void sock_hash_free(struct bpf_map *map) sock_put(elem->sk); sock_hash_free_elem(htab, elem); } + cond_resched(); }
/* wait for psock readers accessing its map link */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 3b221643206d..9dffdd876fef 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -222,57 +222,59 @@ int sysctl_icmp_msgs_per_sec __read_mostly = 1000; int sysctl_icmp_msgs_burst __read_mostly = 50;
static struct { - spinlock_t lock; - u32 credit; + atomic_t credit; u32 stamp; -} icmp_global = { - .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock), -}; +} icmp_global;
/** * icmp_global_allow - Are we allowed to send one more ICMP message ? * * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. * Returns false if we reached the limit and can not send another packet. - * Note: called with BH disabled + * Works in tandem with icmp_global_consume(). */ bool icmp_global_allow(void) { - u32 credit, delta, incr = 0, now = (u32)jiffies; - bool rc = false; + u32 delta, now, oldstamp; + int incr, new, old;
- /* Check if token bucket is empty and cannot be refilled - * without taking the spinlock. The READ_ONCE() are paired - * with the following WRITE_ONCE() in this same function. + /* Note: many cpus could find this condition true. + * Then later icmp_global_consume() could consume more credits, + * this is an acceptable race. */ - if (!READ_ONCE(icmp_global.credit)) { - delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ); - if (delta < HZ / 50) - return false; - } + if (atomic_read(&icmp_global.credit) > 0) + return true;
- spin_lock(&icmp_global.lock); - delta = min_t(u32, now - icmp_global.stamp, HZ); - if (delta >= HZ / 50) { - incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; - if (incr) - WRITE_ONCE(icmp_global.stamp, now); - } - credit = min_t(u32, icmp_global.credit + incr, - READ_ONCE(sysctl_icmp_msgs_burst)); - if (credit) { - /* We want to use a credit of one in average, but need to randomize - * it for security reasons. - */ - credit = max_t(int, credit - get_random_u32_below(3), 0); - rc = true; + now = jiffies; + oldstamp = READ_ONCE(icmp_global.stamp); + delta = min_t(u32, now - oldstamp, HZ); + if (delta < HZ / 50) + return false; + + incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; + if (!incr) + return false; + + if (cmpxchg(&icmp_global.stamp, oldstamp, now) == oldstamp) { + old = atomic_read(&icmp_global.credit); + do { + new = min(old + incr, READ_ONCE(sysctl_icmp_msgs_burst)); + } while (!atomic_try_cmpxchg(&icmp_global.credit, &old, new)); } - WRITE_ONCE(icmp_global.credit, credit); - spin_unlock(&icmp_global.lock); - return rc; + return true; } EXPORT_SYMBOL(icmp_global_allow);
+void icmp_global_consume(void) +{ + int credits = get_random_u32_below(3); + + /* Note: this might make icmp_global.credit negative. */ + if (credits) + atomic_sub(credits, &icmp_global.credit); +} +EXPORT_SYMBOL(icmp_global_consume); + static bool icmpv4_mask_allow(struct net *net, int type, int code) { if (type > NR_ICMP_TYPES) @@ -289,14 +291,16 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code) return false; }
-static bool icmpv4_global_allow(struct net *net, int type, int code) +static bool icmpv4_global_allow(struct net *net, int type, int code, + bool *apply_ratelimit) { if (icmpv4_mask_allow(net, type, code)) return true;
- if (icmp_global_allow()) + if (icmp_global_allow()) { + *apply_ratelimit = true; return true; - + } __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); return false; } @@ -306,15 +310,16 @@ static bool icmpv4_global_allow(struct net *net, int type, int code) */
static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, - struct flowi4 *fl4, int type, int code) + struct flowi4 *fl4, int type, int code, + bool apply_ratelimit) { struct dst_entry *dst = &rt->dst; struct inet_peer *peer; bool rc = true; int vif;
- if (icmpv4_mask_allow(net, type, code)) - goto out; + if (!apply_ratelimit) + return true;
/* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) @@ -329,6 +334,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, out: if (!rc) __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST); + else + icmp_global_consume(); return rc; }
@@ -400,6 +407,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); + bool apply_ratelimit = false; struct flowi4 fl4; struct sock *sk; struct inet_sock *inet; @@ -411,11 +419,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb)) return;
- /* Needed by both icmp_global_allow and icmp_xmit_lock */ + /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ local_bh_disable();
- /* global icmp_msgs_per_sec */ - if (!icmpv4_global_allow(net, type, code)) + /* is global icmp_msgs_per_sec exhausted ? */ + if (!icmpv4_global_allow(net, type, code, &apply_ratelimit)) goto out_bh_enable;
sk = icmp_xmit_lock(net); @@ -448,7 +456,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; - if (icmpv4_xrlim_allow(net, rt, &fl4, type, code)) + if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt); ip_rt_put(rt); out_unlock: @@ -592,6 +600,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); + bool apply_ratelimit = false; struct ipcm_cookie ipc; struct flowi4 fl4; __be32 saddr; @@ -673,7 +682,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, } }
- /* Needed by both icmp_global_allow and icmp_xmit_lock */ + /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless @@ -681,7 +690,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) */ if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && - !icmpv4_global_allow(net, type, code)) + !icmpv4_global_allow(net, type, code, &apply_ratelimit)) goto out_bh_enable;
sk = icmp_xmit_lock(net); @@ -740,7 +749,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, goto out_unlock;
/* peer icmp_ratelimit */ - if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) + if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */ diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 08d4b7132d4c..1c9c686d9522 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL bool "IPv6: RPL Source Routing Header support" depends on IPV6 select LWTUNNEL + select DST_CACHE help Support for RFC6554 RPL Source Routing Header using the lightweight tunnels mechanism. diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 93a594a901d1..a790294d3104 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -175,14 +175,16 @@ static bool icmpv6_mask_allow(struct net *net, int type) return false; }
-static bool icmpv6_global_allow(struct net *net, int type) +static bool icmpv6_global_allow(struct net *net, int type, + bool *apply_ratelimit) { if (icmpv6_mask_allow(net, type)) return true;
- if (icmp_global_allow()) + if (icmp_global_allow()) { + *apply_ratelimit = true; return true; - + } __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); return false; } @@ -191,13 +193,13 @@ static bool icmpv6_global_allow(struct net *net, int type) * Check the ICMP output rate limit */ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, - struct flowi6 *fl6) + struct flowi6 *fl6, bool apply_ratelimit) { struct net *net = sock_net(sk); struct dst_entry *dst; bool res = false;
- if (icmpv6_mask_allow(net, type)) + if (!apply_ratelimit) return true;
/* @@ -228,6 +230,8 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, if (!res) __ICMP6_INC_STATS(net, ip6_dst_idev(dst), ICMP6_MIB_RATELIMITHOST); + else + icmp_global_consume(); dst_release(dst); return res; } @@ -452,6 +456,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, struct net *net; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; + bool apply_ratelimit = false; struct dst_entry *dst; struct icmp6hdr tmp_hdr; struct flowi6 fl6; @@ -533,11 +538,12 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, return; }
- /* Needed by both icmp_global_allow and icmpv6_xmit_lock */ + /* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */ local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit */ - if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type)) + if (!(skb->dev->flags & IFF_LOOPBACK) && + !icmpv6_global_allow(net, type, &apply_ratelimit)) goto out_bh_enable;
mip6_addr_swap(skb, parm); @@ -575,7 +581,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
np = inet6_sk(sk);
- if (!icmpv6_xrlim_allow(sk, type, &fl6)) + if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit)) goto out;
tmp_hdr.icmp6_type = type; @@ -717,6 +723,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct icmp6hdr *icmph = icmp6_hdr(skb); + bool apply_ratelimit = false; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; @@ -781,8 +788,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) goto out;
/* Check the ratelimit */ - if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) || - !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6)) + if ((!(skb->dev->flags & IFF_LOOPBACK) && + !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) || + !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit)) goto out_dst_release;
idev = __in6_dev_get(skb->dev); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 71d692728230..690d1c047691 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, const struct tcphdr *oth, unsigned int otcplen) { struct tcphdr *tcph; - int needs_ack;
skb_reset_transport_header(nskb); - tcph = skb_put(nskb, sizeof(struct tcphdr)); + tcph = skb_put_zero(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; tcph->source = oth->dest; tcph->dest = oth->source;
if (oth->ack) { - needs_ack = 0; tcph->seq = oth->ack_seq; - tcph->ack_seq = 0; } else { - needs_ack = 1; tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + otcplen - (oth->doff<<2)); - tcph->seq = 0; + tcph->ack = 1; }
- /* Reset flags */ - ((u_int8_t *)tcph)[13] = 0; tcph->rst = 1; - tcph->ack = needs_ack; - tcph->window = 0; - tcph->urg_ptr = 0; - tcph->check = 0;
/* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0299886dbeb9..a9104c4c1c02 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -175,7 +175,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) struct net_device *rt_dev = rt->dst.dev; bool handled = false;
- if (rt_idev->dev == dev) { + if (rt_idev && rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(blackhole_netdev); in6_dev_put(rt_idev); handled = true; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index 2c83b7586422..db3c19a42e1c 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -263,10 +263,8 @@ static int rpl_input(struct sk_buff *skb) rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
err = rpl_do_srh(skb, rlwt); - if (unlikely(err)) { - kfree_skb(skb); - return err; - } + if (unlikely(err)) + goto drop;
local_bh_disable(); dst = dst_cache_get(&rlwt->cache); @@ -286,9 +284,13 @@ static int rpl_input(struct sk_buff *skb)
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) - return err; + goto drop;
return dst_input(skb); + +drop: + kfree_skb(skb); + return err; }
static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index a7c39e895b1e..fae701248f05 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -466,6 +466,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do { struct ieee80211_local *local = sdata->local; unsigned long flags; + struct sk_buff_head freeq; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i, flushed; @@ -652,18 +653,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do skb_queue_purge(&sdata->status_queue); }
+ /* + * Since ieee80211_free_txskb() may issue __dev_queue_xmit() + * which should be called with interrupts enabled, reclamation + * is done in two phases: + */ + __skb_queue_head_init(&freeq); + + /* unlink from local queues... */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); - ieee80211_free_txskb(&local->hw, skb); + __skb_queue_tail(&freeq, skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ /* ... and perform actual reclamation with interrupts enabled. */ + skb_queue_walk_safe(&freeq, skb, tmp) { + __skb_unlink(skb, &freeq); + ieee80211_free_txskb(&local->hw, skb); + } + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ieee80211_txq_remove_vlan(local, sdata);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 5bedd9cef414..2517a5521a57 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -940,6 +940,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, }
IEEE80211_SKB_CB(skb)->flags = flags; + IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
skb->dev = sdata->dev;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 3cf252418bd3..78e7ac6c0af0 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -890,7 +890,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, if (ieee80211_is_tx_data(skb)) rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
- if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) + if (!(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) mask = sdata->rc_rateidx_mask[info->band];
if (dest[0].idx < 0) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 3d68db738cde..b58d061333c5 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -636,7 +636,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; - IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX; + IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); } } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 415e951e4138..45a093d3f1fa 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -706,7 +706,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) txrc.skb = tx->skb; txrc.reported_rate.idx = -1;
- if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) { + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) { txrc.rate_idx_mask = ~0; } else { txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 4dab45039f34..282e9644f6fd 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -381,7 +381,7 @@ static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) #define ctnetlink_dump_secctx(a, b) (0) #endif
-#ifdef CONFIG_NF_CONNTRACK_LABELS +#ifdef CONFIG_NF_CONNTRACK_EVENTS static inline int ctnetlink_label_size(const struct nf_conn *ct) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); @@ -390,6 +390,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct) return 0; return nla_total_size(sizeof(labels->bits)); } +#endif
static int ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) @@ -410,10 +411,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
return 0; } -#else -#define ctnetlink_dump_labels(a, b) (0) -#define ctnetlink_label_size(a) (0) -#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index da5684e3fd08..da7fd3919ce4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1778,7 +1778,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family, if (!hook_list) hook_list = &basechain->hook_list;
- list_for_each_entry(hook, hook_list, list) { + list_for_each_entry_rcu(hook, hook_list, list) { if (!first) first = hook;
@@ -4462,7 +4462,7 @@ int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) return -ERANGE;
ms *= NSEC_PER_MSEC; - *result = nsecs_to_jiffies64(ms); + *result = nsecs_to_jiffies64(ms) ? : !!ms; return 0; }
@@ -6691,17 +6691,23 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; } else if (set->flags & NFT_SET_TIMEOUT && !(flags & NFT_SET_ELEM_INTERVAL_END)) { - timeout = READ_ONCE(set->timeout); + timeout = set->timeout; }
expiration = 0; if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) return -EINVAL; + if (timeout == 0) + return -EOPNOTSUPP; + err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION], &expiration); if (err) return err; + + if (expiration > timeout) + return -ERANGE; }
if (nla[NFTA_SET_ELEM_EXPR]) { @@ -6792,7 +6798,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (err < 0) goto err_parse_key_end;
- if (timeout != READ_ONCE(set->timeout)) { + if (timeout != set->timeout) { err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT); if (err < 0) goto err_parse_key_end; @@ -8937,7 +8943,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) flowtable->data.type->setup(&flowtable->data, hook->ops.dev, FLOW_BLOCK_UNBIND); list_del_rcu(&hook->list); - kfree(hook); + kfree_rcu(hook, rcu); } kfree(flowtable->name); module_put(flowtable->data.type->owner); diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c index 41ece61eb57a..00c51cf693f3 100644 --- a/net/qrtr/af_qrtr.c +++ b/net/qrtr/af_qrtr.c @@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
mutex_lock(&qrtr_node_lock); list_for_each_entry(node, &qrtr_all_nodes, item) { - skbn = skb_clone(skb, GFP_KERNEL); + skbn = pskb_copy(skb, GFP_KERNEL); if (!skbn) break; skb_set_owner_w(skbn, skb->sk); diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 593846d25214..114fef65f92e 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, { struct tipc_msg *hdr, *_hdr; struct sk_buff_head tmpq; + u16 cong_link_cnt = 0; struct sk_buff *_skb; - u16 cong_link_cnt; int rc = 0;
/* Is a cluster supporting with new capabilities ? */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2b2dc46dc701..4ce23762b1c9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9679,7 +9679,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, return ERR_PTR(-ENOMEM);
if (n_ssids) - request->ssids = (void *)&request->channels[n_channels]; + request->ssids = (void *)request + + struct_size(request, channels, n_channels); request->n_ssids = n_ssids; if (ie_len) { if (n_ssids) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 4d88e797ae49..4fc6279750ea 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -3209,8 +3209,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, n_channels = ieee80211_get_num_supported_channels(wiphy); }
- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + - n_channels * sizeof(void *), + creq = kzalloc(struct_size(creq, channels, n_channels) + + sizeof(struct cfg80211_ssid), GFP_ATOMIC); if (!creq) return -ENOMEM; @@ -3218,7 +3218,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, creq->wiphy = wiphy; creq->wdev = dev->ieee80211_ptr; /* SSIDs come after channels */ - creq->ssids = (void *)&creq->channels[n_channels]; + creq->ssids = (void *)creq + struct_size(creq, channels, n_channels); creq->n_channels = n_channels; creq->n_ssids = 1; creq->scan_start = jiffies; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 72d78dbc55ff..591cda99d72f 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -115,7 +115,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) n_channels = i; } request->n_channels = n_channels; - request->ssids = (void *)&request->channels[n_channels]; + request->ssids = (void *)request + + struct_size(request, channels, n_channels); request->n_ssids = 1;
memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 4ccf4236031c..3fa16412db15 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -166,6 +166,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic endif endif
+ifeq ($(ARCH), x86) +BPF_EXTRA_CFLAGS += -fcf-protection +endif + TPROGS_CFLAGS += -Wall -O2 TPROGS_CFLAGS += -Wmissing-prototypes TPROGS_CFLAGS += -Wstrict-prototypes @@ -394,7 +398,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -fno-asynchronous-unwind-tables -fcf-protection \ + -fno-asynchronous-unwind-tables \ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c index cfaf1d0e6a5f..35933ae53b92 100644 --- a/security/bpf/hooks.c +++ b/security/bpf/hooks.c @@ -24,7 +24,6 @@ static int __init bpf_lsm_init(void)
struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct bpf_storage_blob), - .lbs_task = sizeof(struct bpf_storage_blob), };
DEFINE_LSM(bpf) = { diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index e22aad7604e8..5dd1e164f9b1 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -932,7 +932,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, } if (rc >= 0) { old_cat = skp->smk_netlabel.attr.mls.cat; - skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat; + rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; synchronize_rcu(); netlbl_catmap_free(old_cat); diff --git a/sound/pci/hda/cs35l41_hda_spi.c b/sound/pci/hda/cs35l41_hda_spi.c index eb287aa5f782..d95954ce55d8 100644 --- a/sound/pci/hda/cs35l41_hda_spi.c +++ b/sound/pci/hda/cs35l41_hda_spi.c @@ -38,6 +38,7 @@ static const struct spi_device_id cs35l41_hda_spi_id[] = { { "cs35l41-hda", 0 }, {} }; +MODULE_DEVICE_TABLE(spi, cs35l41_hda_spi_id);
static const struct acpi_device_id cs35l41_acpi_hda_match[] = { { "CSC3551", 0 }, diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c index e5bb1fed26a0..980e6104c2f3 100644 --- a/sound/pci/hda/tas2781_hda_i2c.c +++ b/sound/pci/hda/tas2781_hda_i2c.c @@ -95,9 +95,7 @@ static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data) static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid) { struct acpi_device *adev; - struct device *physdev; LIST_HEAD(resources); - const char *sub; int ret;
adev = acpi_dev_get_first_match_dev(hid, NULL, -1); @@ -113,18 +111,8 @@ static int tas2781_read_acpi(struct tasdevice_priv *p, const char *hid)
acpi_dev_free_resource_list(&resources); strscpy(p->dev_name, hid, sizeof(p->dev_name)); - physdev = get_device(acpi_get_first_physical_node(adev)); acpi_dev_put(adev);
- /* No side-effect to the playback even if subsystem_id is NULL*/ - sub = acpi_get_subsystem_id(ACPI_HANDLE(physdev)); - if (IS_ERR(sub)) - sub = NULL; - - p->acpi_subsystem_id = sub; - - put_device(physdev); - return 0;
err: @@ -722,7 +710,7 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt) } else return -ENODEV;
- tas_hda->priv->irq_info.irq = clt->irq; + tas_hda->priv->irq = clt->irq; ret = tas2781_read_acpi(tas_hda->priv, device_name); if (ret) return dev_err_probe(tas_hda->dev, ret, diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index e3aca9c785a0..aa163ec40862 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -2903,8 +2903,10 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682) }
if (dev->of_node) { - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); + if (ret) + return ret; } else { ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, init.name, diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c index 68ac5ea50396..92c647d439ec 100644 --- a/sound/soc/codecs/rt5682s.c +++ b/sound/soc/codecs/rt5682s.c @@ -2828,7 +2828,9 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component) }
if (dev->of_node) { - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); + if (ret) + return ret; } else { ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, init.name, dev_name(dev)); diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c index 5d0e5348b361..0444cf90c511 100644 --- a/sound/soc/codecs/tas2781-comlib.c +++ b/sound/soc/codecs/tas2781-comlib.c @@ -14,7 +14,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -406,9 +405,6 @@ EXPORT_SYMBOL_GPL(tasdevice_dsp_remove);
void tasdevice_remove(struct tasdevice_priv *tas_priv) { - if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) - gpio_free(tas_priv->irq_info.irq_gpio); - kfree(tas_priv->acpi_subsystem_id); mutex_destroy(&tas_priv->codec_lock); } EXPORT_SYMBOL_GPL(tasdevice_remove); diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c index 3639dcd0bbb2..629e2195a890 100644 --- a/sound/soc/codecs/tas2781-fmwlib.c +++ b/sound/soc/codecs/tas2781-fmwlib.c @@ -13,7 +13,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/slab.h> diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c index a9d179e30773..43775c194445 100644 --- a/sound/soc/codecs/tas2781-i2c.c +++ b/sound/soc/codecs/tas2781-i2c.c @@ -21,7 +21,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> +#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -616,7 +616,7 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) { struct i2c_client *client = (struct i2c_client *)tas_priv->client; unsigned int dev_addrs[TASDEVICE_MAX_CHANNELS]; - int rc, i, ndev = 0; + int i, ndev = 0;
if (tas_priv->isacpi) { ndev = device_property_read_u32_array(&client->dev, @@ -631,64 +631,34 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) "ti,audio-slots", dev_addrs, ndev); }
- tas_priv->irq_info.irq_gpio = + tas_priv->irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(&client->dev), 0); - } else { + } else if (IS_ENABLED(CONFIG_OF)) { struct device_node *np = tas_priv->dev->of_node; -#ifdef CONFIG_OF - const __be32 *reg, *reg_end; - int len, sw, aw; - - aw = of_n_addr_cells(np); - sw = of_n_size_cells(np); - if (sw == 0) { - reg = (const __be32 *)of_get_property(np, - "reg", &len); - reg_end = reg + len/sizeof(*reg); - ndev = 0; - do { - dev_addrs[ndev] = of_read_number(reg, aw); - reg += aw; - ndev++; - } while (reg < reg_end); - } else { - ndev = 1; - dev_addrs[0] = client->addr; + u64 addr; + + for (i = 0; i < TASDEVICE_MAX_CHANNELS; i++) { + if (of_property_read_reg(np, i, &addr, NULL)) + break; + dev_addrs[ndev++] = addr; } -#else + + tas_priv->irq = of_irq_get(np, 0); + } else { ndev = 1; dev_addrs[0] = client->addr; -#endif - tas_priv->irq_info.irq_gpio = of_irq_get(np, 0); } tas_priv->ndev = ndev; for (i = 0; i < ndev; i++) tas_priv->tasdevice[i].dev_addr = dev_addrs[i];
tas_priv->reset = devm_gpiod_get_optional(&client->dev, - "reset-gpios", GPIOD_OUT_HIGH); + "reset", GPIOD_OUT_HIGH); if (IS_ERR(tas_priv->reset)) dev_err(tas_priv->dev, "%s Can't get reset GPIO\n", __func__);
strcpy(tas_priv->dev_name, tasdevice_id[tas_priv->chip_id].name); - - if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) { - rc = gpio_request(tas_priv->irq_info.irq_gpio, - "AUDEV-IRQ"); - if (!rc) { - gpio_direction_input( - tas_priv->irq_info.irq_gpio); - - tas_priv->irq_info.irq = - gpio_to_irq(tas_priv->irq_info.irq_gpio); - } else - dev_err(tas_priv->dev, "%s: GPIO %d request error\n", - __func__, tas_priv->irq_info.irq_gpio); - } else - dev_err(tas_priv->dev, - "Looking up irq-gpio property failed %d\n", - tas_priv->irq_info.irq_gpio); }
static int tasdevice_i2c_probe(struct i2c_client *i2c) diff --git a/sound/soc/loongson/loongson_card.c b/sound/soc/loongson/loongson_card.c index 406ee8db1a3c..8cc54aedd002 100644 --- a/sound/soc/loongson/loongson_card.c +++ b/sound/soc/loongson/loongson_card.c @@ -127,8 +127,8 @@ static int loongson_card_parse_of(struct loongson_card_data *data) codec = of_get_child_by_name(dev->of_node, "codec"); if (!codec) { dev_err(dev, "audio-codec property missing or invalid\n"); - ret = -EINVAL; - goto err; + of_node_put(cpu); + return -EINVAL; }
for (i = 0; i < card->num_links; i++) { diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index d8288936c912..c4f1f1735af6 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -15,6 +15,7 @@ INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi) CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS) CFLAGS += $(EXTRA_CFLAGS) LDFLAGS += $(EXTRA_LDFLAGS) +LDLIBS += -lelf -lz
# Try to detect best kernel BTF source KERNEL_REL := $(shell uname -r) @@ -51,7 +52,7 @@ clean: libbpf_hdrs: $(BPFOBJ)
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) - $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 92973420c0a5..0f1e5787b4ed 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -378,7 +378,7 @@ static void hists__find_annotations(struct hists *hists, /* skip missing symbols */ nd = rb_next(nd); } else if (use_browser == 1) { - key = hist_entry__tui_annotate(he, evsel, NULL, &annotate_opts); + key = hist_entry__tui_annotate(he, evsel, NULL);
switch (key) { case -1: diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index eb3ef5c24b66..8aba05665467 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -2200,6 +2200,7 @@ int cmd_inject(int argc, const char **argv) .finished_init = perf_event__repipe_op2_synth, .compressed = perf_event__repipe_op4_synth, .auxtrace = perf_event__repipe_auxtrace, + .dont_split_sample_group = true, }, .input_name = "-", .samples = LIST_HEAD_INIT(inject.samples), diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 51499c20da01..865f321d729b 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -372,6 +372,7 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem) rep_argv[i] = argv[j];
ret = cmd_report(i, rep_argv); + free(new_sort_order); free(rep_argv); return ret; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 2a8889c6d7f9..cd2f3f1a7563 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -540,8 +540,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report * evlist__for_each_entry(evlist, pos) { ret = report__browse_block_hists(&rep->block_reports[i++].hist, rep->min_percent, pos, - &rep->session->header.env, - &annotate_opts); + &rep->session->header.env); if (ret != 0) return ret; } @@ -563,6 +562,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c struct hists *hists = evsel__hists(pos); const char *evname = evsel__name(pos);
+ i++; if (symbol_conf.event_group && !evsel__is_group_leader(pos)) continue;
@@ -572,9 +572,8 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
if (rep->total_cycles_mode) { - report__browse_block_hists(&rep->block_reports[i++].hist, - rep->min_percent, pos, - NULL, NULL); + report__browse_block_hists(&rep->block_reports[i - 1].hist, + rep->min_percent, pos, NULL); continue; }
@@ -669,7 +668,7 @@ static int report__browse_hists(struct report *rep) }
ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, - &session->header.env, true, &annotate_opts); + &session->header.env, true); /* * Usually "ret" is the last pressed key, and we only * care if the key notifies us to switch data file. diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index f21a655dd7f9..42185da0f000 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -2633,9 +2633,12 @@ static int timehist_sched_change_event(struct perf_tool *tool, * - previous sched event is out of window - we are done * - sample time is beyond window user cares about - reset it * to close out stats for time window interest + * - If tprev is 0, that is, sched_in event for current task is + * not recorded, cannot determine whether sched_in event is + * within time window interest - ignore it */ if (ptime->end) { - if (tprev > ptime->end) + if (!tprev || tprev > ptime->end) goto out;
if (t > ptime->end) @@ -3068,7 +3071,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { pr_err("Invalid time string\n"); - return -EINVAL; + err = -EINVAL; + goto out; }
if (timehist_check_attr(sched, evlist) != 0) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6ac17763de0e..1c1ec444d501 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -646,8 +646,7 @@ static void *display_thread_tui(void *arg) }
ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, - &top->session->header.env, !top->record_opts.overwrite, - &annotate_opts); + &top->session->header.env, !top->record_opts.overwrite); if (ret == K_RELOAD) { top->zero = true; goto repeat; diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index d9f9fa254a71..20f24d104da8 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -27,7 +27,6 @@ struct annotate_browser { struct rb_node *curr_hot; struct annotation_line *selection; struct arch *arch; - struct annotation_options *opts; bool searching_backwards; char search_bf[128]; }; @@ -97,7 +96,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int struct annotation_write_ops ops = { .first_line = row == 0, .current_entry = is_current_entry, - .change_color = (!notes->options->hide_src_code && + .change_color = (!annotate_opts.hide_src_code && (!is_current_entry || (browser->use_navkeypressed && !browser->navkeypressed))), @@ -128,7 +127,7 @@ static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
while (pos && pos->al.offset == -1) { pos = list_prev_entry(pos, al.node); - if (!ab->opts->hide_src_code) + if (!annotate_opts.hide_src_code) diff++; }
@@ -195,7 +194,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) return; }
- if (notes->options->hide_src_code) { + if (annotate_opts.hide_src_code) { from = cursor->al.idx_asm; to = target->idx_asm; } else { @@ -224,7 +223,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser) int ret = ui_browser__list_head_refresh(browser); int pcnt_width = annotation__pcnt_width(notes);
- if (notes->options->jump_arrows) + if (annotate_opts.jump_arrows) annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL); @@ -258,7 +257,7 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser, parent = *p; l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(al, l, browser->opts->percent_type) < 0) + if (disasm__cmp(al, l, annotate_opts.percent_type) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -294,11 +293,10 @@ static void annotate_browser__set_top(struct annotate_browser *browser, static void annotate_browser__set_rb_top(struct annotate_browser *browser, struct rb_node *nd) { - struct annotation *notes = browser__annotation(&browser->b); struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node); u32 idx = pos->idx;
- if (notes->options->hide_src_code) + if (annotate_opts.hide_src_code) idx = pos->idx_asm; annotate_browser__set_top(browser, pos, idx); browser->curr_hot = nd; @@ -331,7 +329,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, double percent;
percent = annotation_data__percent(&pos->al.data[i], - browser->opts->percent_type); + annotate_opts.percent_type);
if (max_percent < percent) max_percent = percent; @@ -380,12 +378,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) browser->b.seek(&browser->b, offset, SEEK_CUR); al = list_entry(browser->b.top, struct annotation_line, node);
- if (notes->options->hide_src_code) { + if (annotate_opts.hide_src_code) { if (al->idx_asm < offset) offset = al->idx;
- browser->b.nr_entries = notes->nr_entries; - notes->options->hide_src_code = false; + browser->b.nr_entries = notes->src->nr_entries; + annotate_opts.hide_src_code = false; browser->b.seek(&browser->b, -offset, SEEK_CUR); browser->b.top_idx = al->idx - offset; browser->b.index = al->idx; @@ -402,8 +400,8 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) if (al->idx_asm < offset) offset = al->idx_asm;
- browser->b.nr_entries = notes->nr_asm_entries; - notes->options->hide_src_code = true; + browser->b.nr_entries = notes->src->nr_asm_entries; + annotate_opts.hide_src_code = true; browser->b.seek(&browser->b, -offset, SEEK_CUR); browser->b.top_idx = al->idx_asm - offset; browser->b.index = al->idx_asm; @@ -435,7 +433,7 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser) { struct annotation *notes = browser__annotation(browser); ui_browser__reset_index(browser); - browser->nr_entries = notes->nr_asm_entries; + browser->nr_entries = notes->src->nr_asm_entries; }
static int sym_title(struct symbol *sym, struct map *map, char *title, @@ -483,8 +481,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser, target_ms.map = ms->map; target_ms.sym = dl->ops.target.sym; annotation__unlock(notes); - symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts); - sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type); + symbol__tui_annotate(&target_ms, evsel, hbt); + sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type); ui_browser__show_title(&browser->b, title); return true; } @@ -659,7 +657,6 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help) { - struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); struct map_symbol *ms = browser->priv; struct symbol *sym = ms->sym; char symbol_dso[SYM_TITLE_MAX_SIZE]; @@ -667,7 +664,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const if (ui_browser__show(browser, title, help) < 0) return -1;
- sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type); + sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type);
ui_browser__gotorc_title(browser, 0, 0); ui_browser__set_color(browser, HE_COLORSET_ROOT); @@ -809,7 +806,7 @@ static int annotate_browser__run(struct annotate_browser *browser, annotate_browser__show(&browser->b, title, help); continue; case 'k': - notes->options->show_linenr = !notes->options->show_linenr; + annotate_opts.show_linenr = !annotate_opts.show_linenr; continue; case 'l': annotate_browser__show_full_location (&browser->b); @@ -822,18 +819,18 @@ static int annotate_browser__run(struct annotate_browser *browser, ui_helpline__puts(help); continue; case 'o': - notes->options->use_offset = !notes->options->use_offset; + annotate_opts.use_offset = !annotate_opts.use_offset; annotation__update_column_widths(notes); continue; case 'O': - if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) - notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; + if (++annotate_opts.offset_level > ANNOTATION__MAX_OFFSET_LEVEL) + annotate_opts.offset_level = ANNOTATION__MIN_OFFSET_LEVEL; continue; case 'j': - notes->options->jump_arrows = !notes->options->jump_arrows; + annotate_opts.jump_arrows = !annotate_opts.jump_arrows; continue; case 'J': - notes->options->show_nr_jumps = !notes->options->show_nr_jumps; + annotate_opts.show_nr_jumps = !annotate_opts.show_nr_jumps; annotation__update_column_widths(notes); continue; case '/': @@ -860,7 +857,7 @@ static int annotate_browser__run(struct annotate_browser *browser, browser->b.height, browser->b.index, browser->b.top_idx, - notes->nr_asm_entries); + notes->src->nr_asm_entries); } continue; case K_ENTER: @@ -897,15 +894,15 @@ static int annotate_browser__run(struct annotate_browser *browser, annotation__update_column_widths(notes); continue; case 'c': - if (notes->options->show_minmax_cycle) - notes->options->show_minmax_cycle = false; + if (annotate_opts.show_minmax_cycle) + annotate_opts.show_minmax_cycle = false; else - notes->options->show_minmax_cycle = true; + annotate_opts.show_minmax_cycle = true; annotation__update_column_widths(notes); continue; case 'p': case 'b': - switch_percent_type(browser->opts, key == 'b'); + switch_percent_type(&annotate_opts, key == 'b'); hists__scnprintf_title(hists, title, sizeof(title)); annotate_browser__show(&browser->b, title, help); continue; @@ -932,26 +929,23 @@ static int annotate_browser__run(struct annotate_browser *browser, }
int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts) + struct hist_browser_timer *hbt) { - return symbol__tui_annotate(ms, evsel, hbt, opts); + return symbol__tui_annotate(ms, evsel, hbt); }
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts) + struct hist_browser_timer *hbt) { /* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); SLang_init_tty(0, 0, 0);
- return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts); + return map_symbol__tui_annotate(&he->ms, evsel, hbt); }
int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts) + struct hist_browser_timer *hbt) { struct symbol *sym = ms->sym; struct annotation *notes = symbol__annotation(sym); @@ -965,7 +959,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, .priv = ms, .use_navkeypressed = true, }, - .opts = opts, }; struct dso *dso; int ret = -1, err; @@ -991,12 +984,12 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
ui_helpline__push("Press ESC to exit");
- browser.b.width = notes->max_line_len; - browser.b.nr_entries = notes->nr_entries; + browser.b.width = notes->src->max_line_len; + browser.b.nr_entries = notes->src->nr_entries; browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */
- if (notes->options->hide_src_code) + if (annotate_opts.hide_src_code) ui_browser__init_asm_mode(&browser.b);
ret = annotate_browser__run(&browser, evsel, hbt); diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 70db5a717905..bb59d27642cc 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2250,8 +2250,7 @@ struct hist_browser *hist_browser__new(struct hists *hists) static struct hist_browser * perf_evsel_browser__new(struct evsel *evsel, struct hist_browser_timer *hbt, - struct perf_env *env, - struct annotation_options *annotation_opts) + struct perf_env *env) { struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
@@ -2259,7 +2258,6 @@ perf_evsel_browser__new(struct evsel *evsel, browser->hbt = hbt; browser->env = env; browser->title = hists_browser__scnprintf_title; - browser->annotation_opts = annotation_opts; } return browser; } @@ -2432,8 +2430,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act) struct hist_entry *he; int err;
- if (!browser->annotation_opts->objdump_path && - perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path)) + if (!annotate_opts.objdump_path && + perf_env__lookup_objdump(browser->env, &annotate_opts.objdump_path)) return 0;
notes = symbol__annotation(act->ms.sym); @@ -2445,8 +2443,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act) else evsel = hists_to_evsel(browser->hists);
- err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt, - browser->annotation_opts); + err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt); he = hist_browser__selected_entry(browser); /* * offer option to annotate the other branch source or target @@ -2943,11 +2940,10 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline, bool left_exits, struct hist_browser_timer *hbt, float min_pcnt, - struct perf_env *env, bool warn_lost_event, - struct annotation_options *annotation_opts) + struct perf_env *env, bool warn_lost_event) { struct hists *hists = evsel__hists(evsel); - struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts); + struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env); struct branch_info *bi = NULL; #define MAX_OPTIONS 16 char *options[MAX_OPTIONS]; @@ -3398,7 +3394,6 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h struct evsel_menu { struct ui_browser b; struct evsel *selection; - struct annotation_options *annotation_opts; bool lost_events, lost_events_warned; float min_pcnt; struct perf_env *env; @@ -3499,8 +3494,7 @@ static int perf_evsel_menu__run(struct evsel_menu *menu, hbt->timer(hbt->arg); key = evsel__hists_browse(pos, nr_events, help, true, hbt, menu->min_pcnt, menu->env, - warn_lost_event, - menu->annotation_opts); + warn_lost_event); ui_browser__show_title(&menu->b, title); switch (key) { case K_TAB: @@ -3557,7 +3551,7 @@ static bool filter_group_entries(struct ui_browser *browser __maybe_unused,
static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help, struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env, - bool warn_lost_event, struct annotation_options *annotation_opts) + bool warn_lost_event) { struct evsel *pos; struct evsel_menu menu = { @@ -3572,7 +3566,6 @@ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, con }, .min_pcnt = min_pcnt, .env = env, - .annotation_opts = annotation_opts, };
ui_helpline__push("Press ESC to exit"); @@ -3607,8 +3600,7 @@ static bool evlist__single_entry(struct evlist *evlist) }
int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, - float min_pcnt, struct perf_env *env, bool warn_lost_event, - struct annotation_options *annotation_opts) + float min_pcnt, struct perf_env *env, bool warn_lost_event) { int nr_entries = evlist->core.nr_entries;
@@ -3617,7 +3609,7 @@ single_entry: { struct evsel *first = evlist__first(evlist);
return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt, - env, warn_lost_event, annotation_opts); + env, warn_lost_event); } }
@@ -3635,7 +3627,7 @@ single_entry: { }
return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env, - warn_lost_event, annotation_opts); + warn_lost_event); }
static int block_hists_browser__title(struct hist_browser *browser, char *bf, @@ -3654,8 +3646,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf, }
int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, - float min_percent, struct perf_env *env, - struct annotation_options *annotation_opts) + float min_percent, struct perf_env *env) { struct hists *hists = &bh->block_hists; struct hist_browser *browser; @@ -3672,7 +3663,6 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, browser->title = block_hists_browser__title; browser->min_pcnt = min_percent; browser->env = env; - browser->annotation_opts = annotation_opts;
/* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h index 1e938d9ffa5e..de46f6c56b0e 100644 --- a/tools/perf/ui/browsers/hists.h +++ b/tools/perf/ui/browsers/hists.h @@ -4,7 +4,6 @@
#include "ui/browser.h"
-struct annotation_options; struct evsel;
struct hist_browser { @@ -15,7 +14,6 @@ struct hist_browser { struct hist_browser_timer *hbt; struct pstack *pstack; struct perf_env *env; - struct annotation_options *annotation_opts; struct evsel *block_evsel; int print_seq; bool show_dso; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 83da2bceb595..6dfe11cbf30e 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -813,7 +813,6 @@ static __maybe_unused void annotated_source__delete(struct annotated_source *src if (src == NULL) return; zfree(&src->histograms); - zfree(&src->cycles_hist); free(src); }
@@ -848,18 +847,6 @@ static int annotated_source__alloc_histograms(struct annotated_source *src, return src->histograms ? 0 : -1; }
-/* The cycles histogram is lazily allocated. */ -static int symbol__alloc_hist_cycles(struct symbol *sym) -{ - struct annotation *notes = symbol__annotation(sym); - const size_t size = symbol__size(sym); - - notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist)); - if (notes->src->cycles_hist == NULL) - return -1; - return 0; -} - void symbol__annotate_zero_histograms(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); @@ -868,9 +855,10 @@ void symbol__annotate_zero_histograms(struct symbol *sym) if (notes->src != NULL) { memset(notes->src->histograms, 0, notes->src->nr_histograms * notes->src->sizeof_sym_hist); - if (notes->src->cycles_hist) - memset(notes->src->cycles_hist, 0, - symbol__size(sym) * sizeof(struct cyc_hist)); + } + if (notes->branch && notes->branch->cycles_hist) { + memset(notes->branch->cycles_hist, 0, + symbol__size(sym) * sizeof(struct cyc_hist)); } annotation__unlock(notes); } @@ -961,23 +949,33 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, return 0; }
+static struct annotated_branch *annotation__get_branch(struct annotation *notes) +{ + if (notes == NULL) + return NULL; + + if (notes->branch == NULL) + notes->branch = zalloc(sizeof(*notes->branch)); + + return notes->branch; +} + static struct cyc_hist *symbol__cycles_hist(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); + struct annotated_branch *branch;
- if (notes->src == NULL) { - notes->src = annotated_source__new(); - if (notes->src == NULL) - return NULL; - goto alloc_cycles_hist; - } + branch = annotation__get_branch(notes); + if (branch == NULL) + return NULL;
- if (!notes->src->cycles_hist) { -alloc_cycles_hist: - symbol__alloc_hist_cycles(sym); + if (branch->cycles_hist == NULL) { + const size_t size = symbol__size(sym); + + branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); }
- return notes->src->cycles_hist; + return branch->cycles_hist; }
struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) @@ -1086,6 +1084,14 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 return n_insn; }
+static void annotated_branch__delete(struct annotated_branch *branch) +{ + if (branch) { + zfree(&branch->cycles_hist); + free(branch); + } +} + static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) { unsigned n_insn; @@ -1094,6 +1100,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
n_insn = annotation__count_insn(notes, start, end); if (n_insn && ch->num && ch->cycles) { + struct annotated_branch *branch; float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
/* Hide data when there are too many overlaps. */ @@ -1109,10 +1116,11 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 } }
- if (cover_insn) { - notes->hit_cycles += ch->cycles; - notes->hit_insn += n_insn * ch->num; - notes->cover_insn += cover_insn; + branch = annotation__get_branch(notes); + if (cover_insn && branch) { + branch->hit_cycles += ch->cycles; + branch->hit_insn += n_insn * ch->num; + branch->cover_insn += cover_insn; } } } @@ -1122,19 +1130,19 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) int err = 0; s64 offset;
- if (!notes->src || !notes->src->cycles_hist) + if (!notes->branch || !notes->branch->cycles_hist) return 0;
- notes->total_insn = annotation__count_insn(notes, 0, size - 1); - notes->hit_cycles = 0; - notes->hit_insn = 0; - notes->cover_insn = 0; + notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); + notes->branch->hit_cycles = 0; + notes->branch->hit_insn = 0; + notes->branch->cover_insn = 0;
annotation__lock(notes); for (offset = size - 1; offset >= 0; --offset) { struct cyc_hist *ch;
- ch = ¬es->src->cycles_hist[offset]; + ch = ¬es->branch->cycles_hist[offset]; if (ch && ch->cycles) { struct annotation_line *al;
@@ -1153,13 +1161,12 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) al->cycles->max = ch->cycles_max; al->cycles->min = ch->cycles_min; } - notes->have_cycles = true; } }
if (err) { while (++offset < (s64)size) { - struct cyc_hist *ch = ¬es->src->cycles_hist[offset]; + struct cyc_hist *ch = ¬es->branch->cycles_hist[offset];
if (ch && ch->cycles) { struct annotation_line *al = notes->offsets[offset]; @@ -1325,6 +1332,7 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r void annotation__exit(struct annotation *notes) { annotated_source__delete(notes->src); + annotated_branch__delete(notes->branch); }
static struct sharded_mutex *sharded_mutex; @@ -2817,19 +2825,20 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) void annotation__set_offsets(struct annotation *notes, s64 size) { struct annotation_line *al; + struct annotated_source *src = notes->src;
- notes->max_line_len = 0; - notes->nr_entries = 0; - notes->nr_asm_entries = 0; + src->max_line_len = 0; + src->nr_entries = 0; + src->nr_asm_entries = 0;
- list_for_each_entry(al, ¬es->src->source, node) { + list_for_each_entry(al, &src->source, node) { size_t line_len = strlen(al->line);
- if (notes->max_line_len < line_len) - notes->max_line_len = line_len; - al->idx = notes->nr_entries++; + if (src->max_line_len < line_len) + src->max_line_len = line_len; + al->idx = src->nr_entries++; if (al->offset != -1) { - al->idx_asm = notes->nr_asm_entries++; + al->idx_asm = src->nr_asm_entries++; /* * FIXME: short term bandaid to cope with assembly * routines that comes with labels in the same column @@ -3074,13 +3083,14 @@ static void disasm_line__write(struct disasm_line *dl, struct annotation *notes, static void ipc_coverage_string(char *bf, int size, struct annotation *notes) { double ipc = 0.0, coverage = 0.0; + struct annotated_branch *branch = annotation__get_branch(notes);
- if (notes->hit_cycles) - ipc = notes->hit_insn / ((double)notes->hit_cycles); + if (branch && branch->hit_cycles) + ipc = branch->hit_insn / ((double)branch->hit_cycles);
- if (notes->total_insn) { - coverage = notes->cover_insn * 100.0 / - ((double)notes->total_insn); + if (branch && branch->total_insn) { + coverage = branch->cover_insn * 100.0 / + ((double)branch->total_insn); }
scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", @@ -3105,7 +3115,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati int printed;
if (first_line && (al->offset == -1 || percent_max == 0.0)) { - if (notes->have_cycles && al->cycles) { + if (notes->branch && al->cycles) { if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) show_title = true; } else @@ -3142,7 +3152,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati } }
- if (notes->have_cycles) { + if (notes->branch) { if (al->cycles && al->cycles->ipc) obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); else if (!show_title) diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 0fa72eb559ac..b79614c44a24 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -269,27 +269,30 @@ struct cyc_hist { * returns. */ struct annotated_source { - struct list_head source; - int nr_histograms; - size_t sizeof_sym_hist; - struct cyc_hist *cycles_hist; - struct sym_hist *histograms; + struct list_head source; + size_t sizeof_sym_hist; + struct sym_hist *histograms; + int nr_histograms; + int nr_entries; + int nr_asm_entries; + u16 max_line_len; };
-struct LOCKABLE annotation { - u64 max_coverage; - u64 start; +struct annotated_branch { u64 hit_cycles; u64 hit_insn; unsigned int total_insn; unsigned int cover_insn; + struct cyc_hist *cycles_hist; +}; + +struct LOCKABLE annotation { + u64 max_coverage; + u64 start; struct annotation_options *options; struct annotation_line **offsets; int nr_events; int max_jump_sources; - int nr_entries; - int nr_asm_entries; - u16 max_line_len; struct { u8 addr; u8 jumps; @@ -298,8 +301,8 @@ struct LOCKABLE annotation { u8 max_addr; u8 max_ins_name; } widths; - bool have_cycles; struct annotated_source *src; + struct annotated_branch *branch; };
static inline void annotation__init(struct annotation *notes __maybe_unused) @@ -313,10 +316,10 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr
static inline int annotation__cycles_width(struct annotation *notes) { - if (notes->have_cycles && notes->options->show_minmax_cycle) + if (notes->branch && notes->options->show_minmax_cycle) return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH;
- return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; + return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; }
static inline int annotation__pcnt_width(struct annotation *notes) @@ -409,13 +412,11 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel);
#ifdef HAVE_SLANG_SUPPORT int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts); + struct hist_browser_timer *hbt); #else static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused, struct evsel *evsel __maybe_unused, - struct hist_browser_timer *hbt __maybe_unused, - struct annotation_options *opts __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused) { return 0; } diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c index 591fc1edd385..dec910989701 100644 --- a/tools/perf/util/block-info.c +++ b/tools/perf/util/block-info.c @@ -129,9 +129,9 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh, al.sym = he->ms.sym;
notes = symbol__annotation(he->ms.sym); - if (!notes || !notes->src || !notes->src->cycles_hist) + if (!notes || !notes->branch || !notes->branch->cycles_hist) return 0; - ch = notes->src->cycles_hist; + ch = notes->branch->cycles_hist; for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) { if (ch[i].num_aggr) { struct block_info *bi; @@ -464,8 +464,7 @@ void block_info__free_report(struct block_report *reps, int nr_reps) }
int report__browse_block_hists(struct block_hist *bh, float min_percent, - struct evsel *evsel, struct perf_env *env, - struct annotation_options *annotation_opts) + struct evsel *evsel, struct perf_env *env) { int ret;
@@ -477,8 +476,7 @@ int report__browse_block_hists(struct block_hist *bh, float min_percent, return 0; case 1: symbol_conf.report_individual_block = true; - ret = block_hists_tui_browse(bh, evsel, min_percent, - env, annotation_opts); + ret = block_hists_tui_browse(bh, evsel, min_percent, env); return ret; default: return -1; diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h index 42e9dcc4cf0a..96f53e89795e 100644 --- a/tools/perf/util/block-info.h +++ b/tools/perf/util/block-info.h @@ -78,8 +78,7 @@ struct block_report *block_info__create_report(struct evlist *evlist, void block_info__free_report(struct block_report *reps, int nr_reps);
int report__browse_block_hists(struct block_hist *bh, float min_percent, - struct evsel *evsel, struct perf_env *env, - struct annotation_options *annotation_opts); + struct evsel *evsel, struct perf_env *env);
float block_info__total_cycles_percent(struct hist_entry *he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index afc9f1c7f4dc..5d0db96609df 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -457,7 +457,6 @@ struct hist_browser_timer { int refresh; };
-struct annotation_options; struct res_sample;
enum rstype { @@ -473,16 +472,13 @@ struct block_hist; void attr_to_script(char *buf, struct perf_event_attr *attr);
int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *annotation_opts); + struct hist_browser_timer *hbt);
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *annotation_opts); + struct hist_browser_timer *hbt);
int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, - float min_pcnt, struct perf_env *env, bool warn_lost_event, - struct annotation_options *annotation_options); + float min_pcnt, struct perf_env *env, bool warn_lost_event);
int script_browse(const char *script_opt, struct evsel *evsel);
@@ -492,8 +488,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res, void res_sample_init(void);
int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, - float min_percent, struct perf_env *env, - struct annotation_options *annotation_opts); + float min_percent, struct perf_env *env); #else static inline int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused, @@ -501,23 +496,20 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused, struct hist_browser_timer *hbt __maybe_unused, float min_pcnt __maybe_unused, struct perf_env *env __maybe_unused, - bool warn_lost_event __maybe_unused, - struct annotation_options *annotation_options __maybe_unused) + bool warn_lost_event __maybe_unused) { return 0; } static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused, struct evsel *evsel __maybe_unused, - struct hist_browser_timer *hbt __maybe_unused, - struct annotation_options *annotation_options __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused) { return 0; }
static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused, struct evsel *evsel __maybe_unused, - struct hist_browser_timer *hbt __maybe_unused, - struct annotation_options *annotation_opts __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused) { return 0; } @@ -541,8 +533,7 @@ static inline void res_sample_init(void) {} static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused, struct evsel *evsel __maybe_unused, float min_percent __maybe_unused, - struct perf_env *env __maybe_unused, - struct annotation_options *annotation_opts __maybe_unused) + struct perf_env *env __maybe_unused) { return 0; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index c6afba7ab1a5..277b2cbd5186 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1500,6 +1500,9 @@ static int deliver_sample_group(struct evlist *evlist, int ret = -EINVAL; struct sample_read_value *v = sample->read.group.values;
+ if (tool->dont_split_sample_group) + return deliver_sample_value(evlist, tool, event, sample, v, machine); + sample_read_group__for_each(v, sample->read.group.nr, read_format) { ret = deliver_sample_value(evlist, tool, event, sample, v, machine); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 6ab8147a3f87..b80349ca2199 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -583,21 +583,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, {
struct symbol *sym = he->ms.sym; - struct annotation *notes; + struct annotated_branch *branch; double ipc = 0.0, coverage = 0.0; char tmp[64];
if (!sym) return repsep_snprintf(bf, size, "%-*s", width, "-");
- notes = symbol__annotation(sym); + branch = symbol__annotation(sym)->branch;
- if (notes->hit_cycles) - ipc = notes->hit_insn / ((double)notes->hit_cycles); + if (branch && branch->hit_cycles) + ipc = branch->hit_insn / ((double)branch->hit_cycles);
- if (notes->total_insn) { - coverage = notes->cover_insn * 100.0 / - ((double)notes->total_insn); + if (branch && branch->total_insn) { + coverage = branch->cover_insn * 100.0 / + ((double)branch->total_insn); }
snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 0abe35388ab1..f98ade7f9fba 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -1207,7 +1207,8 @@ static void print_metric_headers(struct perf_stat_config *config,
/* Print metrics headers only */ evlist__for_each_entry(evlist, counter) { - if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) + if (!config->iostat_run && + config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) continue;
os.evsel = counter; diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c index 302443921681..1b91ccd4d523 100644 --- a/tools/perf/util/time-utils.c +++ b/tools/perf/util/time-utils.c @@ -20,7 +20,7 @@ int parse_nsec_time(const char *str, u64 *ptime) u64 time_sec, time_nsec; char *end;
- time_sec = strtoul(str, &end, 10); + time_sec = strtoull(str, &end, 10); if (*end != '.' && *end != '\0') return -1;
@@ -38,7 +38,7 @@ int parse_nsec_time(const char *str, u64 *ptime) for (i = strlen(nsec_buf); i < 9; i++) nsec_buf[i] = '0';
- time_nsec = strtoul(nsec_buf, &end, 10); + time_nsec = strtoull(nsec_buf, &end, 10); if (*end != '\0') return -1; } else diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h index c957fb849ac6..62bbc9cec151 100644 --- a/tools/perf/util/tool.h +++ b/tools/perf/util/tool.h @@ -85,6 +85,7 @@ struct perf_tool { bool namespace_events; bool cgroup_events; bool no_warn; + bool dont_split_sample_group; enum show_feature_header show_feat_hdr; };
diff --git a/tools/power/cpupower/lib/powercap.c b/tools/power/cpupower/lib/powercap.c index a7a59c6bacda..94a0c69e55ef 100644 --- a/tools/power/cpupower/lib/powercap.c +++ b/tools/power/cpupower/lib/powercap.c @@ -77,6 +77,14 @@ int powercap_get_enabled(int *mode) return sysfs_get_enabled(path, mode); }
+/* + * TODO: implement function. Returns dummy 0 for now. + */ +int powercap_set_enabled(int mode) +{ + return 0; +} + /* * Hardcoded, because rapl is the only powercap implementation - * this needs to get more generic if more powercap implementations diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile index 8f5febaf1a9a..edb3613513b8 100644 --- a/tools/testing/selftests/arm64/signal/Makefile +++ b/tools/testing/selftests/arm64/signal/Makefile @@ -23,7 +23,7 @@ $(TEST_GEN_PROGS): $(PROGS) # Common test-unit targets to build common-layout test-cases executables # Needs secondary expansion to properly include the testcase c-file in pre-reqs COMMON_SOURCES := test_signals.c test_signals_utils.c testcases/testcases.c \ - signals.S + signals.S sve_helpers.c COMMON_HEADERS := test_signals.h test_signals_utils.h testcases/testcases.h
.SECONDEXPANSION: diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.c b/tools/testing/selftests/arm64/signal/sve_helpers.c new file mode 100644 index 000000000000..0acc121af306 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/sve_helpers.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 ARM Limited + * + * Common helper functions for SVE and SME functionality. + */ + +#include <stdbool.h> +#include <kselftest.h> +#include <asm/sigcontext.h> +#include <sys/prctl.h> + +unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls; + +int sve_fill_vls(bool use_sme, int min_vls) +{ + int vq, vl; + int pr_set_vl = use_sme ? PR_SME_SET_VL : PR_SVE_SET_VL; + int len_mask = use_sme ? PR_SME_VL_LEN_MASK : PR_SVE_VL_LEN_MASK; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(pr_set_vl, vq * 16); + if (vl == -1) + return KSFT_FAIL; + + vl &= len_mask; + + /* + * Unlike SVE, SME does not require the minimum vector length + * to be implemented, or the VLs to be consecutive, so any call + * to the prctl might return the single implemented VL, which + * might be larger than 16. So to avoid this loop never + * terminating, bail out here when we find a higher VL than + * we asked for. + * See the ARM ARM, DDI 0487K.a, B1.4.2: I_QQRNR and I_NWYBP. + */ + if (vq < sve_vq_from_vl(vl)) + break; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + if (nvls < min_vls) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return KSFT_SKIP; + } + + return KSFT_PASS; +} diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.h b/tools/testing/selftests/arm64/signal/sve_helpers.h new file mode 100644 index 000000000000..50948ce471cc --- /dev/null +++ b/tools/testing/selftests/arm64/signal/sve_helpers.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 ARM Limited + * + * Common helper functions for SVE and SME functionality. + */ + +#ifndef __SVE_HELPERS_H__ +#define __SVE_HELPERS_H__ + +#include <stdbool.h> + +#define VLS_USE_SVE false +#define VLS_USE_SME true + +extern unsigned int vls[]; +extern unsigned int nvls; + +int sve_fill_vls(bool use_sme, int min_vls); + +#endif diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c index ebd5815b54bb..dfd6a2badf9f 100644 --- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c +++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c @@ -6,44 +6,28 @@ * handler, this is not supported and is expected to segfault. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
struct fake_sigframe sf; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 2);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SVE_SET_VL, vq * 16); - if (vl == -1) - return false; + if (!res) + return true;
- vl &= PR_SME_VL_LEN_MASK; + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least two VLs */ - if (nvls < 2) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } - - return true; + return false; }
static int fake_sigreturn_ssve_change_vl(struct tdescr *td, @@ -51,30 +35,30 @@ static int fake_sigreturn_ssve_change_vl(struct tdescr *td, { size_t resv_sz, offset; struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); - struct sve_context *sve; + struct za_context *za;
/* Get a signal context with a SME ZA frame in it */ if (!get_current_context(td, &sf.uc, sizeof(sf.uc))) return 1;
resv_sz = GET_SF_RESV_SIZE(sf); - head = get_header(head, SVE_MAGIC, resv_sz, &offset); + head = get_header(head, ZA_MAGIC, resv_sz, &offset); if (!head) { - fprintf(stderr, "No SVE context\n"); + fprintf(stderr, "No ZA context\n"); return 1; }
- if (head->size != sizeof(struct sve_context)) { + if (head->size != sizeof(struct za_context)) { fprintf(stderr, "Register data present, aborting\n"); return 1; }
- sve = (struct sve_context *)head; + za = (struct za_context *)head;
/* No changes are supported; init left us at minimum VL so go to max */ fprintf(stderr, "Attempting to change VL from %d to %d\n", - sve->vl, vls[0]); - sve->vl = vls[0]; + za->vl, vls[0]); + za->vl = vls[0];
fake_sigreturn(&sf, sizeof(sf), 0);
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c index e2a452190511..e1ccf8f85a70 100644 --- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c +++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c @@ -12,40 +12,22 @@ #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
struct fake_sigframe sf; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sve_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SVE, 2);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SVE_SET_VL, vq * 16); - if (vl == -1) - return false; + if (!res) + return true;
- vl &= PR_SVE_VL_LEN_MASK; - - /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least two VLs */ - if (nvls < 2) { - fprintf(stderr, "Only %d VL supported\n", nvls); + if (res == KSFT_SKIP) td->result = KSFT_SKIP; - return false; - }
- return true; + return false; }
static int fake_sigreturn_sve_change_vl(struct tdescr *td, diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c index 3d37daafcff5..6dbe48cf8b09 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -6,51 +6,31 @@ * set up as expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 64]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Did we find the lowest supported VL? */ - if (vq < sve_vq_from_vl(vl)) - break; + if (!res) + return true;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_ssve_regs(void) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c index 9dc5f128bbc0..5557e116e973 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c @@ -6,51 +6,31 @@ * signal frames is set up as expected when enabled simultaneously. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 128]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Did we find the lowest supported VL? */ - if (vq < sve_vq_from_vl(vl)) - break; + if (!res) + return true;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_regs(void) diff --git a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c index 8b16eabbb769..8143eb1c58c1 100644 --- a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c @@ -6,47 +6,31 @@ * expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 64]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sve_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SVE, 1);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SVE_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SVE_VL_LEN_MASK; - - /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); + if (!res) + return true;
- vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_sve_regs(void) diff --git a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c index 4d6f94b6178f..ce26e9c2fa5e 100644 --- a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c @@ -6,47 +6,31 @@ * expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 128]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SME_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); + if (!res) + return true;
- vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c index 174ad6656696..b9e13f27f1f9 100644 --- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c @@ -6,51 +6,31 @@ * expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 128]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SME_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Did we find the lowest supported VL? */ - if (vq < sve_vq_from_vl(vl)) - break; + if (!res) + return true;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_za_regs(void) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index caede9b574cb..ab364e95a9b2 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -56,6 +56,15 @@ TEST_INST_SUBDIRS := no_alu32 ifneq ($(BPF_GCC),) TEST_GEN_PROGS += test_progs-bpf_gcc TEST_INST_SUBDIRS += bpf_gcc + +# The following tests contain C code that, although technically legal, +# triggers GCC warnings that cannot be disabled: declaration of +# anonymous struct types in function parameter lists. +progs/btf_dump_test_case_bitfields.c-bpf_gcc-CFLAGS := -Wno-error +progs/btf_dump_test_case_namespacing.c-bpf_gcc-CFLAGS := -Wno-error +progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error +progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error +progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error endif
ifneq ($(CLANG_CPUV4),) @@ -386,24 +395,25 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h # $1 - input .c file # $2 - output .o file # $3 - CFLAGS +# $4 - binary name define CLANG_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 define CLANG_CPUV4_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE - $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) - $(Q)$(BPF_GCC) $3 -O2 -c $1 -o $2 + $(call msg,GCC-BPF,$4,$2) + $(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2 endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c @@ -442,7 +452,7 @@ LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(ske # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. # Parameters: # $1 - test runner base binary name (e.g., test_progs) -# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc) +# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc) define DEFINE_TEST_RUNNER
TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2 @@ -470,7 +480,7 @@ endef # Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and # set up by DEFINE_TEST_RUNNER itself, create test runner build rules with: # $1 - test runner base binary name (e.g., test_progs) -# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc) +# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, bpf_gcc, etc) define DEFINE_TEST_RUNNER_RULES
ifeq ($($(TRUNNER_OUTPUT)-dir),) @@ -492,7 +502,9 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \ $(wildcard $(BPFDIR)/*.bpf.h) \ | $(TRUNNER_OUTPUT) $$(BPFOBJ) $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ - $(TRUNNER_BPF_CFLAGS)) + $(TRUNNER_BPF_CFLAGS) \ + $$($$<-CFLAGS) \ + $$($$<-$2-CFLAGS),$(TRUNNER_BINARY))
$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) @@ -702,6 +714,8 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
+# Linking uprobe_multi can fail due to relocation overflows on mips. +$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot) $(OUTPUT)/uprobe_multi: uprobe_multi.c $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 73ce11b0547d..b705cbabe1e2 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -10,6 +10,7 @@ #include <sys/sysinfo.h> #include <signal.h> #include "bench.h" +#include "bpf_util.h" #include "testing_helpers.h"
struct env env = { diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 68180d8f8558..005c401b3e22 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -10,6 +10,7 @@ #include <math.h> #include <time.h> #include <sys/syscall.h> +#include <limits.h>
struct cpu_set { bool *cpus; diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c index 18405c3b7cee..af10c309359a 100644 --- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c @@ -412,7 +412,7 @@ static void test_sk_storage_map_stress_free(void) rlim_new.rlim_max = rlim_new.rlim_cur + 128; err = setrlimit(RLIMIT_NOFILE, &rlim_new); CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d", - rlim_new.rlim_cur, errno); + (unsigned long) rlim_new.rlim_cur, errno); }
err = do_sk_storage_map_stress_free(); diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 0877b60ec81f..d2acc8875212 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -465,3 +465,27 @@ int get_socket_local_port(int sock_fd)
return -1; } + +int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param) +{ + struct ifreq ifr = {0}; + int sockfd, err; + + sockfd = socket(AF_INET, SOCK_DGRAM, 0); + if (sockfd < 0) + return -errno; + + memcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); + + ring_param->cmd = ETHTOOL_GRINGPARAM; + ifr.ifr_data = (char *)ring_param; + + if (ioctl(sockfd, SIOCETHTOOL, &ifr) < 0) { + err = errno; + close(sockfd); + return -err; + } + + close(sockfd); + return 0; +} diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 5eccc67d1a99..11cbe194769b 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -9,8 +9,11 @@ typedef __u16 __sum16; #include <linux/if_packet.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/ethtool.h> +#include <linux/sockios.h> #include <netinet/tcp.h> #include <bpf/bpf_endian.h> +#include <net/if.h>
#define MAGIC_VAL 0x1234 #define NUM_ITER 100000 @@ -60,6 +63,7 @@ int make_sockaddr(int family, const char *addr_str, __u16 port, struct sockaddr_storage *addr, socklen_t *len); char *ping_command(int family); int get_socket_local_port(int sock_fd); +int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param);
struct nstoken; /** diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c index b52ff8ce34db..16bed9dd8e6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -95,7 +95,7 @@ static unsigned short get_local_port(int fd) struct sockaddr_in6 addr; socklen_t addrlen = sizeof(addr);
- if (!getsockname(fd, &addr, &addrlen)) + if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen)) return ntohs(addr.sin6_port);
return 0; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 47f42e680105..26019313e1fc 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include "progs/core_reloc_types.h" #include "bpf_testmod/bpf_testmod.h" diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c index 5c0ebe6ba866..95ea5a6a5f18 100644 --- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c @@ -4,7 +4,6 @@ #include <sys/types.h> #include <sys/socket.h> #include <net/if.h> -#include <linux/in6.h>
#include "test_progs.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index c4773173a4e4..3171047414a7 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> -#include <error.h> -#include <linux/if.h> #include <linux/if_tun.h> #include <sys/uio.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index c07991544a78..34f8822fd221 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> #include "kfree_skb.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h index 61333f2a03f9..68c08309dbc8 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -27,8 +27,6 @@ } \ })
-#define NETNS "ns_lwt" - static inline int netns_create(void) { return system("ip netns add " NETNS); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c index 2bc932a18c17..7b458ae5f0f8 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -47,13 +47,13 @@ #include <linux/if_ether.h> #include <linux/if_packet.h> #include <linux/if_tun.h> -#include <linux/icmp.h> #include <arpa/inet.h> #include <unistd.h> #include <errno.h> #include <stdbool.h> #include <stdlib.h>
+#define NETNS "ns_lwt_redirect" #include "lwt_helpers.h" #include "test_progs.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c index f4bb2d5fcae0..920ee3042d09 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -48,6 +48,8 @@ * For case 2, force UDP packets to overflow fq limit. As long as kernel * is not crashed, it is considered successful. */ +#define NETNS "ns_lwt_reroute" +#include <netinet/in.h> #include "lwt_helpers.h" #include "network_helpers.h" #include <linux/net_tstamp.h> diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index 24d493482ffc..2c57ceede095 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -11,78 +11,168 @@ #include <sched.h> #include <sys/wait.h> #include <sys/mount.h> -#include <sys/fcntl.h> +#include <fcntl.h> +#include "network_helpers.h"
#define STACK_SIZE (1024 * 1024) static char child_stack[STACK_SIZE];
-static int test_current_pid_tgid(void *args) +static int get_pid_tgid(pid_t *pid, pid_t *tgid, + struct test_ns_current_pid_tgid__bss *bss) { - struct test_ns_current_pid_tgid__bss *bss; - struct test_ns_current_pid_tgid *skel; - int err = -1, duration = 0; - pid_t tgid, pid; struct stat st; + int err;
- skel = test_ns_current_pid_tgid__open_and_load(); - if (CHECK(!skel, "skel_open_load", "failed to load skeleton\n")) - goto cleanup; - - pid = syscall(SYS_gettid); - tgid = getpid(); + *pid = syscall(SYS_gettid); + *tgid = getpid();
err = stat("/proc/self/ns/pid", &st); - if (CHECK(err, "stat", "failed /proc/self/ns/pid: %d\n", err)) - goto cleanup; + if (!ASSERT_OK(err, "stat /proc/self/ns/pid")) + return err;
- bss = skel->bss; bss->dev = st.st_dev; bss->ino = st.st_ino; bss->user_pid = 0; bss->user_tgid = 0; + return 0; +} + +static int test_current_pid_tgid_tp(void *args) +{ + struct test_ns_current_pid_tgid__bss *bss; + struct test_ns_current_pid_tgid *skel; + int ret = -1, err; + pid_t tgid, pid; + + skel = test_ns_current_pid_tgid__open(); + if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open")) + return ret; + + bpf_program__set_autoload(skel->progs.tp_handler, true); + + err = test_ns_current_pid_tgid__load(skel); + if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load")) + goto cleanup; + + bss = skel->bss; + if (get_pid_tgid(&pid, &tgid, bss)) + goto cleanup;
err = test_ns_current_pid_tgid__attach(skel); - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + if (!ASSERT_OK(err, "test_ns_current_pid_tgid__attach")) goto cleanup;
/* trigger tracepoint */ usleep(1); - ASSERT_EQ(bss->user_pid, pid, "pid"); - ASSERT_EQ(bss->user_tgid, tgid, "tgid"); - err = 0; + if (!ASSERT_EQ(bss->user_pid, pid, "pid")) + goto cleanup; + if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid")) + goto cleanup; + ret = 0;
cleanup: - test_ns_current_pid_tgid__destroy(skel); + test_ns_current_pid_tgid__destroy(skel); + return ret; +}
- return err; +static int test_current_pid_tgid_cgrp(void *args) +{ + struct test_ns_current_pid_tgid__bss *bss; + struct test_ns_current_pid_tgid *skel; + int server_fd = -1, ret = -1, err; + int cgroup_fd = *(int *)args; + pid_t tgid, pid; + + skel = test_ns_current_pid_tgid__open(); + if (!ASSERT_OK_PTR(skel, "test_ns_current_pid_tgid__open")) + return ret; + + bpf_program__set_autoload(skel->progs.cgroup_bind4, true); + + err = test_ns_current_pid_tgid__load(skel); + if (!ASSERT_OK(err, "test_ns_current_pid_tgid__load")) + goto cleanup; + + bss = skel->bss; + if (get_pid_tgid(&pid, &tgid, bss)) + goto cleanup; + + skel->links.cgroup_bind4 = bpf_program__attach_cgroup( + skel->progs.cgroup_bind4, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.cgroup_bind4, "bpf_program__attach_cgroup")) + goto cleanup; + + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto cleanup; + + if (!ASSERT_EQ(bss->user_pid, pid, "pid")) + goto cleanup; + if (!ASSERT_EQ(bss->user_tgid, tgid, "tgid")) + goto cleanup; + ret = 0; + +cleanup: + if (server_fd >= 0) + close(server_fd); + test_ns_current_pid_tgid__destroy(skel); + return ret; }
-static void test_ns_current_pid_tgid_new_ns(void) +static void test_ns_current_pid_tgid_new_ns(int (*fn)(void *), void *arg) { - int wstatus, duration = 0; + int wstatus; pid_t cpid;
/* Create a process in a new namespace, this process * will be the init process of this new namespace hence will be pid 1. */ - cpid = clone(test_current_pid_tgid, child_stack + STACK_SIZE, - CLONE_NEWPID | SIGCHLD, NULL); + cpid = clone(fn, child_stack + STACK_SIZE, + CLONE_NEWPID | SIGCHLD, arg);
- if (CHECK(cpid == -1, "clone", "%s\n", strerror(errno))) + if (!ASSERT_NEQ(cpid, -1, "clone")) return;
- if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", "%s\n", strerror(errno))) + if (!ASSERT_NEQ(waitpid(cpid, &wstatus, 0), -1, "waitpid")) return;
- if (CHECK(WEXITSTATUS(wstatus) != 0, "newns_pidtgid", "failed")) + if (!ASSERT_OK(WEXITSTATUS(wstatus), "newns_pidtgid")) return; }
+static void test_in_netns(int (*fn)(void *), void *arg) +{ + struct nstoken *nstoken = NULL; + + SYS(cleanup, "ip netns add ns_current_pid_tgid"); + SYS(cleanup, "ip -net ns_current_pid_tgid link set dev lo up"); + + nstoken = open_netns("ns_current_pid_tgid"); + if (!ASSERT_OK_PTR(nstoken, "open_netns")) + goto cleanup; + + test_ns_current_pid_tgid_new_ns(fn, arg); + +cleanup: + if (nstoken) + close_netns(nstoken); + SYS_NOFAIL("ip netns del ns_current_pid_tgid"); +} + /* TODO: use a different tracepoint */ void serial_test_ns_current_pid_tgid(void) { - if (test__start_subtest("ns_current_pid_tgid_root_ns")) - test_current_pid_tgid(NULL); - if (test__start_subtest("ns_current_pid_tgid_new_ns")) - test_ns_current_pid_tgid_new_ns(); + if (test__start_subtest("root_ns_tp")) + test_current_pid_tgid_tp(NULL); + if (test__start_subtest("new_ns_tp")) + test_ns_current_pid_tgid_new_ns(test_current_pid_tgid_tp, NULL); + if (test__start_subtest("new_ns_cgrp")) { + int cgroup_fd = -1; + + cgroup_fd = test__join_cgroup("/sock_addr"); + if (ASSERT_GE(cgroup_fd, 0, "join_cgroup")) { + test_in_netns(test_current_pid_tgid_cgrp, &cgroup_fd); + close(cgroup_fd); + } + } } diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c index daa952711d8f..e9c07d561ded 100644 --- a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c +++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> #include "test_parse_tcp_hdr_opt.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index de2466547efe..a1ab0af00454 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -18,7 +18,6 @@ #include <arpa/inet.h> #include <assert.h> #include <errno.h> -#include <error.h> #include <fcntl.h> #include <sched.h> #include <stdio.h> diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index af3c31f82a8a..5a6401733587 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -421,7 +421,7 @@ static int set_forwarding(bool enable)
static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) { - struct __kernel_timespec pkt_ts = {}; + struct timespec pkt_ts = {}; char ctl[CMSG_SPACE(sizeof(pkt_ts))]; struct timespec now_ts; struct msghdr msg = {}; @@ -445,7 +445,7 @@ static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
cmsg = CMSG_FIRSTHDR(&msg); if (cmsg && cmsg->cmsg_level == SOL_SOCKET && - cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) + cmsg->cmsg_type == SO_TIMESTAMPNS) memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; @@ -487,9 +487,9 @@ static int wait_netstamp_needed_key(void) if (!ASSERT_GE(srv_fd, 0, "start_server")) goto done;
- err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS, &opt, sizeof(opt)); - if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) goto done;
cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS); @@ -571,9 +571,9 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port) return;
/* Ensure the kernel puts the (rcv) timestamp for all skb */ - err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS, &opt, sizeof(opt)); - if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) goto done;
if (type == SOCK_STREAM) { diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index 8fe84da1b9b4..6a2da7a64419 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include "cgroup_helpers.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index e51721df14fc..dfff6feac12c 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -4,6 +4,7 @@ #define _GNU_SOURCE #include <linux/compiler.h> #include <linux/ring_buffer.h> +#include <linux/build_bug.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h index a0778fe7857a..41d59f0ee606 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h @@ -3,8 +3,6 @@ #ifndef __PROGS_CG_STORAGE_MULTI_H #define __PROGS_CG_STORAGE_MULTI_H
-#include <asm/types.h> - struct cgroup_value { __u32 egress_pkts; __u32 ingress_pkts; diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c index f5ac5f3e8919..568816307f71 100644 --- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c +++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c @@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
if (fmode & FMODE_WRITE) return -EACCES; + barrier();
return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c index 0763d49f9c42..d0010e698f66 100644 --- a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c @@ -10,18 +10,29 @@ __u64 user_tgid = 0; __u64 dev = 0; __u64 ino = 0;
-SEC("tracepoint/syscalls/sys_enter_nanosleep") -int handler(const void *ctx) +static void get_pid_tgid(void) { struct bpf_pidns_info nsdata;
if (bpf_get_ns_current_pid_tgid(dev, ino, &nsdata, sizeof(struct bpf_pidns_info))) - return 0; + return;
user_pid = nsdata.pid; user_tgid = nsdata.tgid; +}
+SEC("?tracepoint/syscalls/sys_enter_nanosleep") +int tp_handler(const void *ctx) +{ + get_pid_tgid(); return 0; }
+SEC("?cgroup/bind4") +int cgroup_bind4(struct bpf_sock_addr *ctx) +{ + get_pid_tgid(); + return 1; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp index f4936834f76f..435341c25420 100644 --- a/tools/testing/selftests/bpf/test_cpp.cpp +++ b/tools/testing/selftests/bpf/test_cpp.cpp @@ -6,6 +6,10 @@ #include <bpf/libbpf.h> #include <bpf/bpf.h> #include <bpf/btf.h> + +#ifndef _Bool +#define _Bool bool +#endif #include "test_core_extern.skel.h"
template <typename T> diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 4d0650cfb5cd..fda7589c5023 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -126,7 +126,8 @@ static int sched_next_online(int pid, int *next_to_try)
while (next < nr_cpus) { CPU_ZERO(&cpuset); - CPU_SET(next++, &cpuset); + CPU_SET(next, &cpuset); + next++; if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) { ret = 0; break; diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 4d582cac2c09..74620ed3a166 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -10,7 +10,6 @@ #include <sched.h> #include <signal.h> #include <string.h> -#include <execinfo.h> /* backtrace */ #include <sys/sysinfo.h> /* get_nprocs */ #include <netinet/in.h> #include <sys/select.h> @@ -19,6 +18,21 @@ #include <bpf/btf.h> #include "json_writer.h"
+#ifdef __GLIBC__ +#include <execinfo.h> /* backtrace */ +#endif + +/* Default backtrace funcs if missing at link */ +__weak int backtrace(void **buffer, int size) +{ + return 0; +} + +__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) +{ + dprintf(fd, "<backtrace not supported>\n"); +} + static bool verbose(void) { return env.verbosity > VERBOSE_NONE; @@ -1690,7 +1704,7 @@ int main(int argc, char **argv) /* launch workers if requested */ env.worker_id = -1; /* main process */ if (env.workers) { - env.worker_pids = calloc(sizeof(__pid_t), env.workers); + env.worker_pids = calloc(sizeof(pid_t), env.workers); env.worker_socks = calloc(sizeof(int), env.workers); if (env.debug) fprintf(stdout, "Launching %d workers.\n", env.workers); diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 8d994884c7b4..6acffe0426f0 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -220,13 +220,13 @@ int parse_test_list(const char *s, bool is_glob_pattern) { char *input, *state = NULL, *test_spec; - int err = 0; + int err = 0, cnt = 0;
input = strdup(s); if (!input) return -ENOMEM;
- while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) { + while ((test_spec = strtok_r(cnt++ ? NULL : input, ",", &state))) { err = insert_test(set, test_spec, is_glob_pattern); if (err) break; diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c index 2a6efbd0401e..762e4b5ec955 100644 --- a/tools/testing/selftests/bpf/unpriv_helpers.c +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -2,7 +2,6 @@
#include <stdbool.h> #include <stdlib.h> -#include <error.h> #include <stdio.h>
#include "unpriv_helpers.h" diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 0ad98b6a8e6e..611b5a0a6f7e 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -753,13 +753,13 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs) static int parse_stats(const char *stats_str, struct stat_specs *specs) { char *input, *state = NULL, *next; - int err; + int err, cnt = 0;
input = strdup(stats_str); if (!input) return -ENOMEM;
- while ((next = strtok_r(state ? NULL : input, ",", &state))) { + while ((next = strtok_r(cnt++ ? NULL : input, ",", &state))) { err = parse_stat(next, specs); if (err) return err; @@ -1444,7 +1444,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, while (fgets(line, sizeof(line), f)) { char *input = line, *state = NULL, *next; struct verif_stats *st = NULL; - int col = 0; + int col = 0, cnt = 0;
if (!header) { void *tmp; @@ -1462,7 +1462,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, *stat_cntp += 1; }
- while ((next = strtok_r(state ? NULL : input, ",\n", &state))) { + while ((next = strtok_r(cnt++ ? NULL : input, ",\n", &state))) { if (header) { /* for the first line, set up spec stats */ err = parse_stat(next, specs); diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index adb77c1a6a74..79f2da8f6ead 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -288,20 +288,6 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t return 0; }
-struct ethtool_channels { - __u32 cmd; - __u32 max_rx; - __u32 max_tx; - __u32 max_other; - __u32 max_combined; - __u32 rx_count; - __u32 tx_count; - __u32 other_count; - __u32 combined_count; -}; - -#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */ - static int rxq_num(const char *ifname) { struct ethtool_channels ch = { diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc index ff7499eb98d6..ce5d2e62731f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event char type argument -# requires: kprobe_events +# requires: kprobe_events available_filter_functions
case `uname -m` in x86_64) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index a202b2ea4baf..4f72c2875f6b 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event string type argument -# requires: kprobe_events +# requires: kprobe_events available_filter_functions
case `uname -m` in x86_64) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0af1546cc223..44c228bcd699 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -5174,6 +5174,7 @@ __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting);
static DEFINE_PER_CPU(bool, hardware_enabled); +static DEFINE_MUTEX(kvm_usage_lock); static int kvm_usage_count;
static int __hardware_enable_nolock(void) @@ -5206,10 +5207,10 @@ static int kvm_online_cpu(unsigned int cpu) * be enabled. Otherwise running VMs would encounter unrecoverable * errors when scheduled to this CPU. */ - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock); if (kvm_usage_count) ret = __hardware_enable_nolock(); - mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); return ret; }
@@ -5229,10 +5230,10 @@ static void hardware_disable_nolock(void *junk)
static int kvm_offline_cpu(unsigned int cpu) { - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock); if (kvm_usage_count) hardware_disable_nolock(NULL); - mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); return 0; }
@@ -5248,9 +5249,9 @@ static void hardware_disable_all_nolock(void) static void hardware_disable_all(void) { cpus_read_lock(); - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock); hardware_disable_all_nolock(); - mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); cpus_read_unlock(); }
@@ -5281,7 +5282,7 @@ static int hardware_enable_all(void) * enable hardware multiple times. */ cpus_read_lock(); - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock);
r = 0;
@@ -5295,7 +5296,7 @@ static int hardware_enable_all(void) } }
- mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); cpus_read_unlock();
return r; @@ -5323,13 +5324,13 @@ static int kvm_suspend(void) { /* * Secondary CPUs and CPU hotplug are disabled across the suspend/resume - * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count - * is stable. Assert that kvm_lock is not held to ensure the system - * isn't suspended while KVM is enabling hardware. Hardware enabling - * can be preempted, but the task cannot be frozen until it has dropped - * all locks (userspace tasks are frozen via a fake signal). + * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage + * count is stable. Assert that kvm_usage_lock is not held to ensure + * the system isn't suspended while KVM is enabling hardware. Hardware + * enabling can be preempted, but the task cannot be frozen until it has + * dropped all locks (userspace tasks are frozen via a fake signal). */ - lockdep_assert_not_held(&kvm_lock); + lockdep_assert_not_held(&kvm_usage_lock); lockdep_assert_irqs_disabled();
if (kvm_usage_count) @@ -5339,7 +5340,7 @@ static int kvm_suspend(void)
static void kvm_resume(void) { - lockdep_assert_not_held(&kvm_lock); + lockdep_assert_not_held(&kvm_usage_lock); lockdep_assert_irqs_disabled();
if (kvm_usage_count)
linux-stable-mirror@lists.linaro.org