I'm announcing the release of the 6.11.2 kernel.
All users of the 6.11 kernel series must upgrade.
The updated 6.11.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.11.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
.gitignore | 1 Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 | 2 Documentation/arch/arm64/silicon-errata.rst | 2 Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml | 1 Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml | 26 Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml | 1 Documentation/driver-api/ipmi.rst | 2 Documentation/virt/kvm/locking.rst | 33 - Makefile | 2 arch/arm/boot/dts/microchip/sam9x60.dtsi | 4 arch/arm/boot/dts/microchip/sama7g5.dtsi | 2 arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts | 2 arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi | 12 arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts | 2 arch/arm/mach-ep93xx/clock.c | 2 arch/arm/mach-versatile/platsmp-realview.c | 1 arch/arm/vfp/vfpinstr.h | 48 - arch/arm64/Kconfig | 2 arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts | 2 arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi | 3 arch/arm64/boot/dts/mediatek/mt8186.dtsi | 12 arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi | 1 arch/arm64/boot/dts/mediatek/mt8195.dtsi | 12 arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts | 1 arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi | 33 - arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi | 33 + arch/arm64/boot/dts/qcom/sa8775p.dtsi | 2 arch/arm64/boot/dts/qcom/x1e80100.dtsi | 10 arch/arm64/boot/dts/renesas/r9a07g043u.dtsi | 4 arch/arm64/boot/dts/renesas/r9a07g044.dtsi | 4 arch/arm64/boot/dts/renesas/r9a07g054.dtsi | 4 arch/arm64/boot/dts/renesas/r9a08g045.dtsi | 4 arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts | 4 arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts | 2 arch/arm64/boot/dts/ti/k3-am654-idk.dtso | 8 arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts | 4 arch/arm64/boot/dts/ti/k3-j721e-sk.dts | 4 arch/arm64/include/asm/cputype.h | 2 arch/arm64/include/asm/esr.h | 88 +-- arch/arm64/include/uapi/asm/sigcontext.h | 6 arch/arm64/kernel/cpu_errata.c | 10 arch/arm64/kernel/smp.c | 160 +++-- arch/arm64/kvm/hyp/nvhe/ffa.c | 21 arch/arm64/net/bpf_jit_comp.c | 57 +- arch/m68k/kernel/process.c | 2 arch/powerpc/crypto/Kconfig | 1 arch/powerpc/include/asm/asm-compat.h | 6 arch/powerpc/include/asm/atomic.h | 5 arch/powerpc/include/asm/uaccess.h | 7 arch/powerpc/kernel/head_8xx.S | 6 arch/powerpc/kernel/vdso/gettimeofday.S | 4 arch/powerpc/mm/nohash/8xx.c | 4 arch/riscv/include/asm/kvm_vcpu_pmu.h | 21 arch/riscv/kernel/perf_callchain.c | 2 arch/riscv/kvm/vcpu_pmu.c | 14 arch/riscv/kvm/vcpu_sbi.c | 4 arch/s390/include/asm/ftrace.h | 17 arch/s390/kernel/Makefile | 2 arch/s390/kernel/early.c | 7 arch/s390/kernel/earlypgm.S | 23 arch/s390/kernel/entry.S | 16 arch/s390/kernel/stacktrace.c | 19 arch/um/Kconfig | 1 arch/x86/coco/tdx/tdx.c | 6 arch/x86/crypto/aesni-intel_glue.c | 59 +- arch/x86/events/intel/core.c | 8 arch/x86/events/intel/pt.c | 15 arch/x86/include/asm/acpi.h | 8 arch/x86/include/asm/hardirq.h | 8 arch/x86/include/asm/idtentry.h | 6 arch/x86/include/asm/kvm_host.h | 2 arch/x86/kernel/acpi/boot.c | 11 arch/x86/kernel/cpu/sgx/main.c | 27 arch/x86/kernel/head64.c | 3 arch/x86/kernel/jailhouse.c | 1 arch/x86/kernel/mmconf-fam10h_64.c | 1 arch/x86/kernel/process_64.c | 29 - arch/x86/kernel/smpboot.c | 1 arch/x86/kernel/x86_init.c | 1 arch/x86/kvm/lapic.c | 73 +- arch/x86/kvm/svm/svm.c | 2 arch/x86/kvm/vmx/main.c | 2 arch/x86/mm/tlb.c | 7 arch/x86/net/bpf_jit_comp.c | 107 ++- arch/x86/pci/fixup.c | 4 arch/x86/xen/mmu_pv.c | 5 arch/x86/xen/p2m.c | 98 +++ arch/x86/xen/setup.c | 202 +++++-- arch/x86/xen/xen-ops.h | 6 block/bfq-iosched.c | 81 +- block/blk-core.c | 1 block/elevator.c | 4 block/partitions/core.c | 8 crypto/asymmetric_keys/asymmetric_type.c | 7 crypto/xor.c | 31 - drivers/acpi/acpica/exsystem.c | 11 drivers/acpi/cppc_acpi.c | 43 + drivers/acpi/device_sysfs.c | 5 drivers/acpi/pmic/tps68470_pmic.c | 6 drivers/acpi/resource.c | 12 drivers/acpi/video_detect.c | 8 drivers/ata/libata-eh.c | 8 drivers/ata/libata-scsi.c | 5 drivers/base/core.c | 15 drivers/base/firmware_loader/main.c | 30 + drivers/base/module.c | 14 drivers/block/drbd/drbd_main.c | 6 drivers/block/drbd/drbd_state.c | 2 drivers/block/nbd.c | 15 drivers/block/ublk_drv.c | 62 +- drivers/bluetooth/btusb.c | 5 drivers/bus/arm-integrator-lm.c | 1 drivers/bus/mhi/host/pci_generic.c | 26 drivers/char/hw_random/Kconfig | 1 drivers/char/hw_random/bcm2835-rng.c | 4 drivers/char/hw_random/cctrng.c | 1 drivers/char/hw_random/mtk-rng.c | 2 drivers/char/tpm/tpm-dev-common.c | 2 drivers/char/tpm/tpm2-sessions.c | 1 drivers/char/tpm/tpm2-space.c | 3 drivers/clk/at91/sama7g5.c | 5 drivers/clk/imx/clk-composite-7ulp.c | 7 drivers/clk/imx/clk-composite-8m.c | 53 + drivers/clk/imx/clk-composite-93.c | 15 drivers/clk/imx/clk-fracn-gppll.c | 4 drivers/clk/imx/clk-imx6ul.c | 4 drivers/clk/imx/clk-imx8mp-audiomix.c | 13 drivers/clk/imx/clk-imx8mp.c | 4 drivers/clk/imx/clk-imx8qxp.c | 10 drivers/clk/qcom/clk-alpha-pll.c | 52 + drivers/clk/qcom/clk-alpha-pll.h | 2 drivers/clk/qcom/dispcc-sm8250.c | 9 drivers/clk/qcom/dispcc-sm8550.c | 14 drivers/clk/qcom/gcc-ipq5332.c | 1 drivers/clk/rockchip/clk-rk3228.c | 2 drivers/clk/rockchip/clk-rk3588.c | 2 drivers/clk/starfive/clk-starfive-jh7110-vout.c | 2 drivers/clk/ti/clk-dra7-atl.c | 1 drivers/clocksource/timer-qcom.c | 7 drivers/cpufreq/ti-cpufreq.c | 10 drivers/cpuidle/cpuidle-riscv-sbi.c | 21 drivers/crypto/caam/caamhash.c | 1 drivers/crypto/ccp/sev-dev.c | 15 drivers/crypto/hisilicon/hpre/hpre_main.c | 54 - drivers/crypto/hisilicon/qm.c | 151 +++-- drivers/crypto/hisilicon/sec2/sec_main.c | 16 drivers/crypto/hisilicon/zip/zip_main.c | 23 drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h | 2 drivers/crypto/intel/qat/qat_common/adf_init.c | 4 drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c | 9 drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c | 14 drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h | 1 drivers/crypto/intel/qat/qat_common/adf_vf_isr.c | 2 drivers/crypto/n2_core.c | 1 drivers/crypto/qcom-rng.c | 4 drivers/cxl/core/pci.c | 8 drivers/edac/igen6_edac.c | 2 drivers/edac/synopsys_edac.c | 35 + drivers/firewire/core-cdev.c | 2 drivers/firmware/arm_scmi/optee.c | 7 drivers/firmware/efi/libstub/tpm.c | 2 drivers/firmware/qcom/qcom_scm.c | 6 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 4 drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 29 - drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 14 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 drivers/gpu/drm/amd/amdgpu/mes_v12_0.c | 14 drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c | 7 drivers/gpu/drm/amd/amdgpu/soc24.c | 23 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 165 ----- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c | 4 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 94 ++- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 18 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 138 +++- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 6 drivers/gpu/drm/amd/display/dc/dc_dsc.h | 3 drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c | 4 drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c | 8 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 5 drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 50 + drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 6 drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 27 drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 13 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c | 12 drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 1 drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 3 drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 2 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 6 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 3 drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 6 drivers/gpu/drm/bridge/lontium-lt8912b.c | 35 - drivers/gpu/drm/exynos/exynos_drm_gsc.c | 2 drivers/gpu/drm/mediatek/mtk_crtc.c | 32 + drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 12 drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 2 drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 30 + drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 46 - drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h | 2 drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 2 drivers/gpu/drm/msm/dp/dp_display.c | 10 drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 12 drivers/gpu/drm/radeon/evergreen_cs.c | 62 +- drivers/gpu/drm/radeon/radeon_atombios.c | 29 - drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 2 drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 4 drivers/gpu/drm/stm/drv.c | 4 drivers/gpu/drm/stm/ltdc.c | 2 drivers/gpu/drm/vc4/vc4_hdmi.c | 8 drivers/gpu/drm/xe/xe_exec_queue.c | 157 ++--- drivers/gpu/drm/xe/xe_exec_queue.h | 6 drivers/gpu/drm/xe/xe_hw_engine.c | 31 + drivers/gpu/drm/xe/xe_hw_engine.h | 7 drivers/gpu/drm/xe/xe_migrate.c | 2 drivers/gpu/drm/xe/xe_vm.c | 8 drivers/hid/wacom_wac.c | 13 drivers/hid/wacom_wac.h | 2 drivers/hwmon/max16065.c | 17 drivers/hwmon/ntc_thermistor.c | 1 drivers/hwtracing/coresight/coresight-dummy.c | 4 drivers/hwtracing/coresight/coresight-tmc-etr.c | 2 drivers/hwtracing/coresight/coresight-tpdm.c | 6 drivers/i2c/busses/i2c-aspeed.c | 16 drivers/i2c/busses/i2c-isch.c | 3 drivers/i2c/busses/i2c-xiic.c | 41 - drivers/idle/intel_idle.c | 79 ++ drivers/iio/adc/ad7606.c | 8 drivers/iio/adc/ad7606_spi.c | 5 drivers/iio/chemical/bme680_core.c | 7 drivers/iio/magnetometer/ak8975.c | 1 drivers/infiniband/core/cache.c | 4 drivers/infiniband/core/iwcm.c | 2 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 2 drivers/infiniband/hw/cxgb4/cm.c | 5 drivers/infiniband/hw/erdma/erdma_verbs.c | 25 drivers/infiniband/hw/hns/hns_roce_ah.c | 14 drivers/infiniband/hw/hns/hns_roce_hem.c | 22 drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 33 - drivers/infiniband/hw/hns/hns_roce_qp.c | 16 drivers/infiniband/hw/irdma/verbs.c | 2 drivers/infiniband/hw/mlx5/main.c | 2 drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 drivers/infiniband/hw/mlx5/mr.c | 99 +-- drivers/infiniband/hw/mlx5/umr.c | 3 drivers/infiniband/ulp/rtrs/rtrs-clt.c | 9 drivers/infiniband/ulp/rtrs/rtrs-srv.c | 1 drivers/input/keyboard/adp5588-keys.c | 2 drivers/input/misc/ims-pcu.c | 2 drivers/input/serio/i8042-acpipnpio.h | 37 + drivers/input/touchscreen/ilitek_ts_i2c.c | 18 drivers/interconnect/icc-clk.c | 3 drivers/interconnect/qcom/sm8350.c | 1 drivers/iommu/amd/io_pgtable.c | 8 drivers/iommu/amd/io_pgtable_v2.c | 4 drivers/iommu/amd/iommu.c | 57 -- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 4 drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 28 + drivers/iommu/arm/arm-smmu/arm-smmu.c | 2 drivers/iommu/iommufd/hw_pagetable.c | 3 drivers/iommu/iommufd/io_pagetable.c | 8 drivers/iommu/iommufd/selftest.c | 9 drivers/leds/leds-bd2606mvv.c | 23 drivers/leds/leds-gpio.c | 9 drivers/leds/leds-pca995x.c | 21 drivers/md/dm-integrity.c | 15 drivers/md/dm-rq.c | 4 drivers/md/dm.c | 11 drivers/md/md.c | 1 drivers/media/dvb-frontends/rtl2830.c | 2 drivers/media/dvb-frontends/rtl2832.c | 2 drivers/media/platform/imagination/Kconfig | 1 drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c | 9 drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c | 9 drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c | 10 drivers/media/platform/raspberrypi/pisp_be/Kconfig | 1 drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c | 1 drivers/media/tuners/tuner-i2c.h | 4 drivers/mtd/devices/powernv_flash.c | 3 drivers/mtd/devices/slram.c | 2 drivers/mtd/nand/raw/mtk_nand.c | 36 - drivers/net/bareudp.c | 26 drivers/net/bonding/bond_main.c | 6 drivers/net/can/m_can/m_can.c | 14 drivers/net/can/usb/esd_usb.c | 6 drivers/net/ethernet/freescale/enetc/enetc.c | 3 drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c | 4 drivers/net/ethernet/intel/idpf/idpf_txrx.c | 35 - drivers/net/ethernet/intel/idpf/idpf_txrx.h | 9 drivers/net/ethernet/meta/Kconfig | 2 drivers/net/ethernet/meta/fbnic/fbnic_txrx.c | 8 drivers/net/ethernet/realtek/r8169_phy_config.c | 2 drivers/net/ethernet/renesas/ravb.h | 1 drivers/net/ethernet/renesas/ravb_main.c | 18 drivers/net/ethernet/seeq/ether3.c | 2 drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 3 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 37 - drivers/net/netkit.c | 3 drivers/net/phy/aquantia/aquantia_firmware.c | 42 - drivers/net/phy/aquantia/aquantia_leds.c | 3 drivers/net/phy/aquantia/aquantia_main.c | 24 drivers/net/usb/usbnet.c | 37 + drivers/net/virtio_net.c | 88 ++- drivers/net/wireless/ath/ath11k/core.h | 1 drivers/net/wireless/ath/ath11k/mac.c | 12 drivers/net/wireless/ath/ath11k/wmi.c | 4 drivers/net/wireless/ath/ath12k/mac.c | 5 drivers/net/wireless/ath/ath12k/wmi.c | 1 drivers/net/wireless/ath/ath12k/wmi.h | 3 drivers/net/wireless/ath/ath9k/debug.c | 2 drivers/net/wireless/ath/ath9k/htc_drv_debug.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 30 - drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h | 40 + drivers/net/wireless/intel/iwlwifi/cfg/bz.c | 11 drivers/net/wireless/intel/iwlwifi/iwl-config.h | 1 drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 2 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c | 1 drivers/net/wireless/intel/iwlwifi/mvm/time-event.c | 14 drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 4 drivers/net/wireless/mediatek/mt76/mac80211.c | 2 drivers/net/wireless/mediatek/mt76/mt7603/dma.c | 4 drivers/net/wireless/mediatek/mt76/mt7615/init.c | 3 drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h | 4 drivers/net/wireless/mediatek/mt76/mt7915/init.c | 2 drivers/net/wireless/mediatek/mt76/mt7915/main.c | 3 drivers/net/wireless/mediatek/mt76/mt7921/init.c | 4 drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 5 drivers/net/wireless/mediatek/mt76/mt7925/main.c | 15 drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 3 drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h | 1 drivers/net/wireless/mediatek/mt76/mt7925/pci.c | 2 drivers/net/wireless/mediatek/mt76/mt7996/init.c | 65 +- drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 4 drivers/net/wireless/mediatek/mt76/mt7996/main.c | 6 drivers/net/wireless/mediatek/mt76/mt7996/mcu.c | 13 drivers/net/wireless/microchip/wilc1000/hif.c | 4 drivers/net/wireless/realtek/rtw88/coex.c | 38 - drivers/net/wireless/realtek/rtw88/fw.c | 13 drivers/net/wireless/realtek/rtw88/main.c | 7 drivers/net/wireless/realtek/rtw88/rtw8821cu.c | 2 drivers/net/wireless/realtek/rtw88/rtw8822c.c | 10 drivers/net/wireless/realtek/rtw88/rx.h | 2 drivers/net/wireless/realtek/rtw89/core.c | 1 drivers/net/wireless/realtek/rtw89/core.h | 3 drivers/net/wireless/realtek/rtw89/fw.c | 6 drivers/net/wireless/realtek/rtw89/fw.h | 7 drivers/net/wireless/realtek/rtw89/mac.c | 13 drivers/net/wireless/realtek/rtw89/mac.h | 1 drivers/net/wireless/realtek/rtw89/reg.h | 4 drivers/net/wireless/virtual/mac80211_hwsim.c | 2 drivers/ntb/hw/intel/ntb_hw_gen1.c | 2 drivers/ntb/ntb_transport.c | 23 drivers/ntb/test/ntb_perf.c | 2 drivers/nvdimm/namespace_devs.c | 34 - drivers/nvme/host/multipath.c | 2 drivers/pci/controller/dwc/pci-dra7xx.c | 11 drivers/pci/controller/dwc/pci-imx6.c | 15 drivers/pci/controller/dwc/pci-keystone.c | 2 drivers/pci/controller/dwc/pcie-kirin.c | 4 drivers/pci/controller/dwc/pcie-qcom-ep.c | 14 drivers/pci/controller/pcie-xilinx-nwl.c | 39 - drivers/pci/pci.c | 20 drivers/pci/pci.h | 6 drivers/pci/quirks.c | 31 - drivers/perf/alibaba_uncore_drw_pmu.c | 2 drivers/perf/arm-cmn.c | 109 +-- drivers/perf/dwc_pcie_pmu.c | 21 drivers/perf/hisilicon/hisi_pcie_pmu.c | 16 drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c | 1 drivers/pinctrl/mvebu/pinctrl-dove.c | 42 + drivers/pinctrl/pinctrl-single.c | 3 drivers/pinctrl/renesas/pinctrl-rzg2l.c | 8 drivers/pinctrl/ti/pinctrl-ti-iodelay.c | 52 - drivers/platform/cznic/turris-omnia-mcu-trng.c | 4 drivers/platform/x86/ideapad-laptop.c | 48 - drivers/pmdomain/core.c | 6 drivers/power/supply/axp20x_battery.c | 16 drivers/power/supply/max17042_battery.c | 5 drivers/powercap/intel_rapl_common.c | 2 drivers/pps/clients/pps_parport.c | 8 drivers/regulator/of_regulator.c | 2 drivers/remoteproc/imx_rproc.c | 6 drivers/reset/reset-berlin.c | 3 drivers/reset/reset-k210.c | 3 drivers/s390/crypto/ap_bus.c | 17 drivers/scsi/NCR5380.c | 78 +- drivers/scsi/elx/libefc/efc_nport.c | 2 drivers/scsi/lpfc/lpfc_hw4.h | 3 drivers/scsi/lpfc/lpfc_init.c | 21 drivers/scsi/lpfc/lpfc_scsi.c | 2 drivers/scsi/mac_scsi.c | 162 ++--- drivers/scsi/sd.c | 4 drivers/scsi/smartpqi/smartpqi_init.c | 20 drivers/soc/fsl/qe/qmc.c | 24 drivers/soc/fsl/qe/tsa.c | 2 drivers/soc/qcom/smd-rpm.c | 35 - drivers/soc/versatile/soc-integrator.c | 1 drivers/soc/versatile/soc-realview.c | 20 drivers/spi/atmel-quadspi.c | 15 drivers/spi/spi-airoha-snfi.c | 43 - drivers/spi/spi-bcmbca-hsspi.c | 8 drivers/spi/spi-fsl-lpspi.c | 1 drivers/spi/spi-nxp-fspi.c | 54 + drivers/spi/spi-ppc4xx.c | 7 drivers/staging/media/starfive/camss/stf-camss.c | 2 drivers/thermal/gov_bang_bang.c | 14 drivers/thermal/thermal_core.c | 78 +- drivers/thermal/thermal_core.h | 27 drivers/thermal/thermal_sysfs.c | 205 +++---- drivers/thermal/thermal_trip.c | 6 drivers/tty/serial/8250/8250_omap.c | 2 drivers/tty/serial/qcom_geni_serial.c | 101 +-- drivers/tty/serial/rp2.c | 2 drivers/tty/serial/serial_core.c | 13 drivers/ufs/host/ufs-qcom.c | 2 drivers/usb/cdns3/cdnsp-ring.c | 6 drivers/usb/cdns3/host.c | 4 drivers/usb/class/cdc-acm.c | 2 drivers/usb/dwc2/drd.c | 9 drivers/usb/gadget/udc/dummy_hcd.c | 14 drivers/usb/host/xhci-pci.c | 22 drivers/usb/host/xhci-ring.c | 14 drivers/usb/host/xhci.h | 1 drivers/usb/misc/appledisplay.c | 15 drivers/usb/misc/cypress_cy7c63.c | 4 drivers/usb/misc/yurex.c | 10 drivers/usb/typec/ucsi/ucsi.c | 33 - drivers/vdpa/mlx5/core/mr.c | 3 drivers/vhost/vdpa.c | 16 drivers/video/fbdev/hpfb.c | 1 drivers/video/fbdev/xen-fbfront.c | 1 drivers/virtio/virtio.c | 59 +- drivers/watchdog/imx_sc_wdt.c | 24 drivers/xen/swiotlb-xen.c | 10 fs/autofs/inode.c | 3 fs/btrfs/btrfs_inode.h | 1 fs/btrfs/ctree.h | 2 fs/btrfs/extent-tree.c | 4 fs/btrfs/file.c | 34 + fs/btrfs/ioctl.c | 4 fs/btrfs/subpage.c | 10 fs/btrfs/tree-checker.c | 2 fs/cachefiles/xattr.c | 34 - fs/debugfs/inode.c | 8 fs/erofs/decompressor.c | 2 fs/erofs/inode.c | 20 fs/erofs/zdata.c | 71 +- fs/eventpoll.c | 2 fs/exfat/nls.c | 5 fs/ext4/ialloc.c | 14 fs/ext4/inline.c | 35 - fs/ext4/mballoc.c | 10 fs/ext4/super.c | 29 - fs/f2fs/compress.c | 36 + fs/f2fs/data.c | 10 fs/f2fs/dir.c | 3 fs/f2fs/extent_cache.c | 4 fs/f2fs/f2fs.h | 37 - fs/f2fs/file.c | 114 ++-- fs/f2fs/inode.c | 5 fs/f2fs/namei.c | 68 -- fs/f2fs/segment.c | 15 fs/f2fs/super.c | 16 fs/f2fs/xattr.c | 14 fs/fcntl.c | 14 fs/fuse/file.c | 2 fs/inode.c | 4 fs/jfs/jfs_dmap.c | 4 fs/jfs/jfs_imap.c | 2 fs/namespace.c | 14 fs/netfs/main.c | 4 fs/nfs/nfs4proc.c | 16 fs/nfs/nfs4state.c | 1 fs/nfsd/filecache.c | 3 fs/nfsd/nfs4idmap.c | 13 fs/nfsd/nfs4recover.c | 8 fs/nfsd/nfs4state.c | 164 +++-- fs/nilfs2/btree.c | 12 fs/quota/dquot.c | 3 fs/smb/server/vfs.c | 19 include/acpi/acoutput.h | 5 include/acpi/cppc_acpi.h | 2 include/linux/bpf.h | 9 include/linux/bpf_lsm.h | 8 include/linux/compiler.h | 2 include/linux/f2fs_fs.h | 2 include/linux/lsm_hook_defs.h | 1 include/linux/lsm_hooks.h | 1 include/linux/sbitmap.h | 2 include/linux/soc/qcom/geni-se.h | 9 include/linux/usb/usbnet.h | 15 include/linux/virtio.h | 11 include/net/bluetooth/hci_core.h | 4 include/net/ip.h | 2 include/net/mac80211.h | 7 include/net/netfilter/nf_tables.h | 2 include/net/tcp.h | 21 include/sound/tas2563-tlv.h | 279 ++++++++++ include/sound/tas2781-tlv.h | 260 --------- include/sound/tas2781.h | 7 include/trace/events/f2fs.h | 3 io_uring/io-wq.c | 25 io_uring/io_uring.c | 4 io_uring/rw.c | 8 io_uring/sqpoll.c | 12 kernel/bpf/bpf_lsm.c | 34 + kernel/bpf/btf.c | 13 kernel/bpf/helpers.c | 12 kernel/bpf/syscall.c | 4 kernel/bpf/verifier.c | 134 ++-- kernel/cgroup/pids.c | 18 kernel/kthread.c | 10 kernel/locking/lockdep.c | 48 + kernel/module/Makefile | 2 kernel/padata.c | 6 kernel/rcu/tree_nocb.h | 5 kernel/sched/deadline.c | 38 - kernel/sched/fair.c | 34 - kernel/trace/bpf_trace.c | 15 lib/debugobjects.c | 5 lib/sbitmap.c | 4 lib/xz/xz_crc32.c | 2 lib/xz/xz_private.h | 4 mm/damon/vaddr.c | 2 mm/huge_memory.c | 2 mm/hugetlb.c | 176 +++--- mm/internal.h | 11 mm/memory.c | 8 mm/migrate.c | 2 mm/mmap.c | 4 mm/util.c | 2 net/bluetooth/hci_conn.c | 6 net/bluetooth/hci_sync.c | 5 net/bluetooth/mgmt.c | 13 net/can/bcm.c | 4 net/can/j1939/transport.c | 8 net/core/filter.c | 71 +- net/core/sock_map.c | 1 net/ipv4/icmp.c | 103 ++- net/ipv6/Kconfig | 1 net/ipv6/icmp.c | 28 - net/ipv6/netfilter/nf_reject_ipv6.c | 14 net/ipv6/route.c | 2 net/ipv6/rpl_iptunnel.c | 12 net/mac80211/iface.c | 17 net/mac80211/mlme.c | 30 - net/mac80211/offchannel.c | 1 net/mac80211/rate.c | 2 net/mac80211/scan.c | 2 net/mac80211/tx.c | 2 net/netfilter/nf_conntrack_netlink.c | 7 net/netfilter/nf_tables_api.c | 47 + net/netfilter/nft_compat.c | 6 net/netfilter/nft_dynset.c | 6 net/netfilter/nft_log.c | 2 net/netfilter/nft_meta.c | 2 net/netfilter/nft_numgen.c | 2 net/netfilter/nft_set_pipapo.c | 13 net/netfilter/nft_tunnel.c | 5 net/qrtr/af_qrtr.c | 2 net/tipc/bcast.c | 2 net/unix/af_unix.c | 80 +- net/unix/garbage.c | 16 net/wireless/nl80211.c | 3 net/wireless/scan.c | 6 net/wireless/sme.c | 3 net/wireless/util.c | 10 net/xdp/xsk_buff_pool.c | 25 samples/bpf/Makefile | 6 security/apparmor/include/net.h | 3 security/apparmor/lsm.c | 17 security/apparmor/net.c | 2 security/bpf/hooks.c | 1 security/integrity/ima/ima.h | 2 security/integrity/ima/ima_iint.c | 20 security/integrity/ima/ima_main.c | 2 security/landlock/fs.c | 9 security/security.c | 68 +- security/selinux/hooks.c | 80 +- security/selinux/include/objsec.h | 5 security/selinux/netlabel.c | 23 security/smack/smack.h | 5 security/smack/smack_lsm.c | 70 +- security/smack/smack_netfilter.c | 4 security/smack/smackfs.c | 2 sound/pci/hda/cs35l41_hda_spi.c | 1 sound/pci/hda/tas2781_hda_i2c.c | 2 sound/soc/codecs/rt5682.c | 4 sound/soc/codecs/rt5682s.c | 4 sound/soc/codecs/tas2781-comlib.c | 3 sound/soc/codecs/tas2781-fmwlib.c | 1 sound/soc/codecs/tas2781-i2c.c | 27 sound/soc/loongson/loongson_card.c | 4 tools/bpf/bpftool/btf.c | 7 tools/bpf/runqslower/Makefile | 3 tools/build/feature/test-all.c | 4 tools/include/nolibc/string.h | 1 tools/lib/bpf/btf.c | 4 tools/lib/bpf/btf_relocate.c | 2 tools/lib/bpf/libbpf.c | 75 +- tools/objtool/arch/loongarch/decode.c | 11 tools/objtool/check.c | 23 tools/objtool/include/objtool/elf.h | 1 tools/perf/builtin-c2c.c | 14 tools/perf/builtin-inject.c | 1 tools/perf/builtin-mem.c | 20 tools/perf/builtin-report.c | 3 tools/perf/builtin-sched.c | 8 tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json | 60 +- tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json | 60 +- tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json | 57 -- tools/perf/scripts/python/arm-cs-trace-disasm.py | 9 tools/perf/ui/hist.c | 10 tools/perf/util/Build | 1 tools/perf/util/annotate-data.c | 2 tools/perf/util/bpf_skel/lock_data.h | 4 tools/perf/util/bpf_skel/vmlinux/vmlinux.h | 1 tools/perf/util/disasm.c | 187 ------ tools/perf/util/disasm_bpf.c | 195 ++++++ tools/perf/util/disasm_bpf.h | 12 tools/perf/util/dwarf-aux.c | 16 tools/perf/util/mem-events.c | 20 tools/perf/util/mem-events.h | 4 tools/perf/util/session.c | 3 tools/perf/util/stat-display.c | 3 tools/perf/util/time-utils.c | 4 tools/perf/util/tool.h | 1 tools/power/cpupower/lib/powercap.c | 8 tools/testing/selftests/arm64/signal/Makefile | 2 tools/testing/selftests/arm64/signal/sve_helpers.c | 56 ++ tools/testing/selftests/arm64/signal/sve_helpers.h | 21 tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c | 46 - tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c | 30 - tools/testing/selftests/arm64/signal/testcases/ssve_regs.c | 36 - tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c | 36 - tools/testing/selftests/arm64/signal/testcases/sve_regs.c | 32 - tools/testing/selftests/arm64/signal/testcases/za_no_regs.c | 32 - tools/testing/selftests/arm64/signal/testcases/za_regs.c | 36 - tools/testing/selftests/bpf/Makefile | 13 tools/testing/selftests/bpf/bench.c | 1 tools/testing/selftests/bpf/bench.h | 1 tools/testing/selftests/bpf/map_tests/sk_storage_map.c | 2 tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c | 2 tools/testing/selftests/bpf/prog_tests/core_reloc.c | 1 tools/testing/selftests/bpf/prog_tests/crypto_sanity.c | 1 tools/testing/selftests/bpf/prog_tests/decap_sanity.c | 1 tools/testing/selftests/bpf/prog_tests/flow_dissector.c | 2 tools/testing/selftests/bpf/prog_tests/kfree_skb.c | 1 tools/testing/selftests/bpf/prog_tests/lwt_redirect.c | 1 tools/testing/selftests/bpf/prog_tests/lwt_reroute.c | 1 tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c | 2 tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c | 1 tools/testing/selftests/bpf/prog_tests/sk_lookup.c | 1 tools/testing/selftests/bpf/prog_tests/tc_redirect.c | 12 tools/testing/selftests/bpf/prog_tests/tcp_rtt.c | 1 tools/testing/selftests/bpf/prog_tests/user_ringbuf.c | 1 tools/testing/selftests/bpf/progs/bpf_misc.h | 24 tools/testing/selftests/bpf/progs/cg_storage_multi.h | 2 tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c | 1 tools/testing/selftests/bpf/progs/verifier_spill_fill.c | 8 tools/testing/selftests/bpf/test_cpp.cpp | 4 tools/testing/selftests/bpf/test_loader.c | 256 +++++++-- tools/testing/selftests/bpf/test_lru_map.c | 3 tools/testing/selftests/bpf/test_progs.c | 18 tools/testing/selftests/bpf/test_progs.h | 1 tools/testing/selftests/bpf/testing_helpers.c | 6 tools/testing/selftests/bpf/unpriv_helpers.c | 1 tools/testing/selftests/bpf/veristat.c | 8 tools/testing/selftests/dt/test_unprobed_devices.sh | 15 tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc | 12 tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc | 9 tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc | 2 tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc | 2 tools/testing/selftests/kselftest.h | 2 tools/testing/selftests/mm/mseal_test.c | 57 +- tools/testing/selftests/net/af_unix/msg_oob.c | 23 tools/testing/selftests/net/netfilter/ipvs.sh | 2 tools/testing/selftests/resctrl/cat_test.c | 7 virt/kvm/kvm_main.c | 31 - 685 files changed, 7172 insertions(+), 4678 deletions(-)
Aaron Lu (1): x86/sgx: Fix deadlock in SGX NUMA node search
Abel Vesa (1): arm64: dts: qcom: x1e80100: Fix PHY for DP2
Abhinav Kumar (1): drm/msm/dp: enable widebus on all relevant chipsets
Adrian Hunter (1): perf/x86/intel/pt: Fix sampling synchronization
Alan Maguire (1): libbpf: Fix license for btf_relocate.c
Aleksa Sarai (1): autofs: fix missing fput for FSCONFIG_SET_FD
Aleksandr Mishin (2): ACPI: PMIC: Remove unneeded check in tps68470_pmic_opregion_probe() drm/msm: Fix incorrect file name output in adreno_request_fw()
Alex Bee (1): drm/rockchip: vop: Allow 4096px width scaling
Alex Deucher (5): drm/amdgpu: properly handle vbios fake edid sizing drm/radeon: properly handle vbios fake edid sizing drm/amdgpu/mes12: reduce timeout drm/amdgpu/mes11: reduce timeout drm/amdgpu: bump driver version for cleared VRAM
Alex Hung (2): drm/amd/display: Check link_res->hpo_dp_link_enc before using it drm/amd/display: disable_hpo_dp_link_output: Check link_res->hpo_dp_link_enc before using it
Alexander Dahl (3): ARM: dts: microchip: sam9x60: Fix rtc/rtt clocks spi: atmel-quadspi: Avoid overwriting delay register settings spi: atmel-quadspi: Fix wrong register value written to MR
Alexander Shiyan (1): clk: rockchip: rk3588: Fix 32k clock name for pmu_24m_32k_100m_src_p
Alexandra Diupina (1): PCI: kirin: Fix buffer overflow in kirin_pcie_parse_port()
Alexei Starovoitov (1): selftests/bpf: Workaround strict bpf_lsm return value check.
Alexey Gladkov (Intel) (1): x86/tdx: Fix "in-kernel MMIO" check
Amit Shah (1): crypto: ccp - do not request interrupt on cmd completion when irqs disabled
Anastasia Belova (1): arm64: esr: Define ESR_ELx_EC_* constants as UL
Andre Przywara (1): kselftest/arm64: signal: fix/refactor SVE vector length enumeration
Andrew Davis (2): arm64: dts: ti: k3-j721e-sk: Fix reversed C6x carveout locations arm64: dts: ti: k3-j721e-beagleboneai64: Fix reversed C6x carveout locations
Andrew Jones (1): RISC-V: KVM: Fix sbiret init before forwarding to userspace
Andrey Konovalov (1): usb: gadget: dummy_hcd: execute hrtimer callback in softirq context
Andrii Nakryiko (1): libbpf: Fix bpf_object__open_skeleton()'s mishandling of options
Andy Shevchenko (3): spi: ppc4xx: Avoid returning 0 when failed to parse and map IRQ platform/x86: ideapad-laptop: Make the scope_guard() clear of its scope i2c: isch: Add missed 'else'
AngeloGioacchino Del Regno (1): arm64: dts: mediatek: mt8186: Fix supported-hw mask for GPU OPPs
Ankit Agrawal (1): clocksource/drivers/qcom: Add missing iounmap() on errors in msm_dt_timer_init()
Antoniu Miclaus (1): ABI: testing: fix admv8818 attr description
Anup Patel (1): RISC-V: KVM: Don't zero-out PMU snapshot area before freeing data
Ard Biesheuvel (1): efistub/tpm: Use ACPI reclaim memory for event log to avoid corruption
Arend van Spriel (1): wifi: brcmfmac: introducing fwil query functions
Arnaldo Carvalho de Melo (2): perf bpf: Move BPF disassembly routines to separate file to avoid clash with capstone bpf headers perf build: Fix up broken capstone feature detection fast path
Artem Bityutskiy (2): intel_idle: add Granite Rapids Xeon support intel_idle: fix ACPI _CST matching for newer Xeon platforms
Artur Weber (1): power: supply: max17042_battery: Fix SOC threshold calc w/ no current sense
Atish Patra (2): RISC-V: KVM: Allow legacy PMU access from guest RISC-V: KVM: Fix to allow hpmcounter31 from the guest
Aurabindo Pillai (2): drm/amd/display: free bo used for dmub bounding box drm/amd/display: Fix underflow when setting underscan on DCN401
Avraham Stern (2): wifi: iwlwifi: mvm: set the cipher for secured NDP ranging wifi: iwlwifi: mvm: increase the time between ranging measurements
Baochen Qiang (1): wifi: ath12k: fix invalid AMPDU factor calculation in ath12k_peer_assoc_h_he()
Baokun Li (1): netfs: Delete subtree of 'fs/netfs' when netfs module exits
Biju Das (1): media: platform: rzg2l-cru: rzg2l-csi2: Add missing MODULE_DEVICE_TABLE
Bitterblue Smith (3): wifi: rtw88: Fix USB/SDIO devices not transmitting beacons wifi: rtw88: 8822c: Fix reported RX band width wifi: rtw88: 8703b: Fix reported RX band width
Bjørn Mork (1): wifi: mt76: mt7915: fix oops on non-dbdc mt7986
Breno Leitao (1): netkit: Assign missing bpf_net_context
Brett Creeley (1): fbnic: Set napi irq value after calling netif_napi_add
Brian Masney (1): crypto: qcom-rng - fix support for ACPI-based systems
Calvin Owens (1): ARM: 9410/1: vfp: Use asm volatile in fmrx/fmxr macros
Casey Schaufler (1): lsm: infrastructure management of the sock security
Chao Yu (10): f2fs: atomic: fix to avoid racing w/ GC f2fs: reduce expensive checkpoint trigger frequency f2fs: fix to avoid racing in between read and OPU dio write f2fs: fix to wait page writeback before setting gcing flag f2fs: atomic: fix to truncate pagecache before on-disk metadata truncation f2fs: fix to avoid use-after-free in f2fs_stop_gc_thread() f2fs: get rid of online repaire on corrupted directory f2fs: fix to don't set SB_RDONLY in f2fs_handle_critical_error() f2fs: fix to wait dio completion f2fs: fix to check atomic_file in f2fs ioctl interfaces
Charles Han (1): mtd: powernv: Add check devm_kasprintf() returned value
Chen Yu (2): kthread: fix task state in kthread worker if being frozen sched/pelt: Use rq_clock_task() for hw_pressure
Chen-Yu Tsai (5): regulator: Return actual error in of_regulator_bulk_get_all() arm64: dts: mediatek: mt8195: Correct clock order for dp_intf* arm64: dts: mediatek: mt8195-cherry: Mark USB 3.0 on xhci1 as disabled arm64: dts: mediatek: mt8395-nio-12l: Mark USB 3.0 on xhci1 as disabled arm64: dts: mediatek: mt8186-corsola: Disable DPI display interface
Cheng Xu (1): RDMA/erdma: Return QP state in erdma_query_qp
Chengchang Tang (2): RDMA/hns: Fix spin_unlock_irqrestore() called with IRQs enabled RDMA/hns: Fix 1bit-ECC recovery address in non-4K OS
Chia-Yuan Li (1): wifi: rtw89: limit the PPDU length for VHT rate to 0x40000
Chris Mi (1): IB/mlx5: Fix UMR pd cleanup on error flow of driver init
Chris Morgan (1): power: supply: axp20x_battery: Remove design from min and max voltage
Christian A. Ehrhardt (1): usb: typec: ucsi: Fix busy loop on ASUS VivoBooks
Christophe JAILLET (3): fbdev: hpfb: Fix an error handling path in hpfb_dio_probe() drm/stm: Fix an error handling path in stm_drm_platform_probe() pinctrl: ti: ti-iodelay: Fix some error handling paths
Christophe Leroy (3): powerpc/8xx: Fix initial memory mapping powerpc/8xx: Fix kernel vs user address comparison powerpc/vdso: Inconditionally use CFUNC macro
Claudiu Beznea (3): ARM: dts: microchip: sama7g5: Fix RTT clock drm/stm: ltdc: check memory returned by devm_kzalloc() clk: at91: sama7g5: Allocate only the needed amount of memory for PLLs
Clément Léger (1): ACPI: CPPC: Fix MASK_VAL() usage
Connor Abbott (3): drm/msm: Use a7xx family directly in gpu_state drm/msm: Dump correct dbgahb clusters on a750 drm/msm: Fix CP_BV_DRAW_STATE_ADDR name
Cristian Ciocaltea (1): phy: phy-rockchip-samsung-hdptx: Explicitly include pm_runtime.h
Cristian Marussi (1): firmware: arm_scmi: Fix double free in OPTEE transport
D Scott Phillips (1): arm64: errata: Enable the AC03_CPU_38 workaround for ampere1a
Daeho Jeong (1): f2fs: prevent atomic file from being dirtied before commit
Dafna Hirschfeld (1): drm/xe: fix missing 'xe_vm_put'
Damien Le Moal (2): ata: libata-scsi: Fix ata_msense_control() CDL page reporting block: Fix elv_iosched_local_module handling of "none" scheduler
Dan Carpenter (7): crypto: iaa - Fix potential use after free bug powercap: intel_rapl: Fix off by one in get_rpi() platform: cznic: turris-omnia-mcu: Fix error check in omnia_mcu_register_trng() iommu/arm-smmu-v3: Fix a NULL vs IS_ERR() check scsi: elx: libefc: Fix potential use after free in efc_nport_vport_del() PCI: keystone: Fix if-statement expression in ks_pcie_quirk() ep93xx: clock: Fix off by one in ep93xx_div_recalc_rate()
Daniel Borkmann (4): bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit bpf: Fix helper writes to read-only maps bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
Daniel Golle (2): net: phy: aquantia: fix setting active_low bit net: phy: aquantia: fix applying active_low bit after reset
Daniel Yang (1): exfat: resolve memory leak from exfat_create_upcase_table()
Danny Tsen (1): crypto: powerpc/p10-aes-gcm - Disable CRYPTO_AES_GCM_P10
Dave Jiang (1): ntb: Force physically contiguous allocation of rx ring buffers
Dave Martin (1): arm64: signal: Fix some under-bracketed UAPI macros
David Belanger (1): drm/amdgpu: Fix selfring initialization sequence on soc24
David Gow (1): mm: only enforce minimum stack gap size if it's sensible
David Howells (1): cachefiles: Fix non-taking of sb_writers around set/removexattr
David Lechner (2): clk: ti: dra7-atl: Fix leak of of_nodes Input: ims-pcu - fix calling interruptible mutex
David Vernet (1): libbpf: Don't take direct pointers into BTF data from st_ops
David Virag (1): arm64: dts: exynos: exynos7885-jackpotlte: Correct RAM amount to 4GB
Dillon Varone (1): drm/amd/display: Block timing sync for different output formats in pmo
Dmitry Antipov (4): wifi: rtw88: always wait for both firmware loading attempts wifi: cfg80211: fix UBSAN noise in cfg80211_wext_siwscan() wifi: cfg80211: fix two more possible UBSAN-detected off-by-one errors wifi: mac80211: use two-phase skb reclamation in ieee80211_do_stop()
Dmitry Baryshkov (9): iommu/arm-smmu-qcom: apply num_context_bank fixes for SDM630 / SDM660 drm/msm/dsi: correct programming sequence for SM8350 / SM8450 clk: qcom: dispcc-sm8550: fix several supposed typos clk: qcom: dispcc-sm8550: use rcg2_ops for mdss_dptx1_aux_clk_src clk: qcom: dispcc-sm8650: Update the GDSC flags clk: qcom: dispcc-sm8550: use rcg2_shared_ops for ESC RCGs clk: qcom: dispcc-sm8250: use special function for Lucid 5LPE PLL interconnect: qcom: sm8250: Enable sync_state Revert "soc: qcom: smd-rpm: Match rpmsg channel instead of compatible"
Dmitry Kandybka (1): wifi: rtw88: remove CPT execution branch never used
Dmitry Vyukov (2): x86/entry: Remove unwanted instrumentation in common_interrupt() module: Fix KCOV-ignored file name
Dominik Grzegorzek (1): drm/xe: Move and export xe_hw_engine lookup.
Douglas Anderson (4): arm64: smp: smp_send_stop() and crash_smp_send_stop() should try non-NMI first soc: qcom: geni-se: add GP_LENGTH/IRQ_EN_SET/IRQ_EN_CLEAR registers serial: qcom-geni: fix arg types for qcom_geni_serial_poll_bit() serial: qcom-geni: introduce qcom_geni_serial_poll_bitfield()
Dragan Simic (2): arm64: dts: rockchip: Raise Pinebook Pro's panel backlight PWM frequency arm64: dts: rockchip: Correct the Pinebook Pro battery design capacity
Dragos Tatulea (1): vdpa/mlx5: Fix invalid mr resource destroy
Eduard Zingerman (7): selftests/bpf: no need to track next_match_pos in struct test_loader selftests/bpf: extract test_loader->expect_msgs as a data structure selftests/bpf: allow checking xlated programs in verifier_* tests selftests/bpf: __arch_* macro to limit test cases to specific archs selftests/bpf: fix to avoid __msg tag de-duplication by clang bpf: correctly handle malformed BPF_CORE_TYPE_ID_LOCAL relos selftests/bpf: correctly move 'log' upon successful match
Eliav Bar-ilan (1): iommu/amd: Fix argument order in amd_iommu_dev_flush_pasid_all()
Emanuele Ghidoli (2): Input: ilitek_ts_i2c - avoid wrong input subsystem sync Input: ilitek_ts_i2c - add report id message validation
Emmanuel Grumbach (2): wifi: mac80211: fix the comeback long retry times wifi: iwlwifi: mvm: allow ESR when we the ROC expires
Eric Biggers (1): crypto: x86/aes-gcm - fix PREEMPT_RT issue in gcm_crypt()
Eric Dumazet (4): sock_map: Add a cond_resched() in sock_hash_free() ipv6: avoid possible NULL deref in rt6_uncached_list_flush_dev() netfilter: nf_reject_ipv6: fix nf_reject_ip6_tcphdr_put() icmp: change the order of rate limits
Esther Shimanovich (1): ACPI: video: force native for Apple MacbookPro9,2
Fabio Porcedda (1): bus: mhi: host: pci_generic: Fix the name for the Telit FE990A
Fangzhi Zuo (4): drm/amd/display: Fix Synaptics Cascaded Panamera DSC Determination drm/amd/display: Add DSC Debug Log drm/amdgpu/display: Fix a mistake in revert commit drm/amd/display: Skip Recompute DSC Params if no Stream on Link
Fei Shao (1): drm/mediatek: Use spin_lock_irqsave() for CRTC event lock
Felix Fietkau (2): wifi: mt76: mt7603: fix mixed declarations and code wifi: mt76: mt7996: fix uninitialized TLV data
Felix Moessbauer (4): io_uring/io-wq: do not allow pinning outside of cpuset io_uring/io-wq: inherit cpuset of cgroup in io worker io_uring/sqpoll: do not allow pinning outside of cpuset io_uring/sqpoll: do not put cpumask on stack
Filipe Manana (1): btrfs: fix race setting file private on concurrent lseek using same fd
Finn Thain (5): m68k: Fix kernel_clone_args.flags in m68k_clone() scsi: NCR5380: Check for phase match during PDMA fixup scsi: mac_scsi: Revise printk(KERN_DEBUG ...) messages scsi: mac_scsi: Refactor polling loop scsi: mac_scsi: Disallow bus errors during PDMA send
Florian Fainelli (1): tty: rp2: Fix reset with non forgiving PCIe host bridges
Florian Westphal (1): netfilter: nf_tables: store new sets in dedicated list
Francesco Dolcini (1): hwrng: cn10k - Enable by default CN10K driver if Thunder SoC is enabled
Frank Li (2): dt-bindings: PCI: layerscape-pci: Replace fsl,lx2160a-pcie with fsl,lx2160ar2-pcie PCI: imx6: Fix missing call to phy_power_off() in error handling
Frank Min (2): drm/amdgpu: update golden regs for gfx12 drm/amdgpu: fix PTE copy corruption for sdma 7
Frederic Weisbecker (1): rcu/nocb: Fix RT throttling hrtimer armed from offline CPU
Furong Xu (1): net: stmmac: set PP_FLAG_DMA_SYNC_DEV only if XDP is enabled
Gao Xiang (2): erofs: fix incorrect symlink detection in fast symlink erofs: handle overlapped pclusters out of crafted images properly
Gaosheng Cui (2): hwrng: bcm2835 - Add missing clk_disable_unprepare in bcm2835_rng_init hwrng: cctrng - Add missing clk_disable_unprepare in cctrng_resume
Geert Uytterhoeven (4): pmdomain: core: Harden inter-column space in debug summary pmdomain: core: Fix "managed by" alignment in debug summary media: raspberrypi: VIDEO_RASPBERRYPI_PISP_BE should depend on ARCH_BCM2835 media: imagination: VIDEO_E5010_JPEG_ENC should depend on ARCH_K3
Gilbert Wu (1): scsi: smartpqi: revert propagate-the-multipath-failure-to-SML-quickly
Greg Kroah-Hartman (1): Linux 6.11.2
Guenter Roeck (2): hwmon: (max16065) Fix overflows seen when writing limits hwmon: (max16065) Fix alarm attributes
Guillaume Nault (2): bareudp: Pull inner IP header in bareudp_udp_encap_recv(). bareudp: Pull inner IP header on xmit.
Guillaume Stols (2): iio: adc: ad7606: fix oversampling gpio array iio: adc: ad7606: fix standby gpio state to match the documentation
Guoqing Jiang (2): nfsd: call cache_put if xdr_reserve_space returns NULL hwrng: mtk - Use devm_pm_runtime_enable
Haibo Chen (3): spi: fspi: involve lut_num for struct nxp_fspi_devtype_data dt-bindings: spi: nxp-fspi: add imx8ulp support spi: fspi: add support for imx8ulp
Hannes Reinecke (1): nvme-multipath: system fails to create generic nvme device
Hao Ge (1): selftests/bpf: Fix incorrect parameters in NULL pointer checking
Harald Freudenberger (1): s390/ap: Fix deadlock caused by recursive lock of the AP bus scan mutex
Heikki Krogerus (1): usb: typec: ucsi: Call CANCEL from single location
Heiko Carstens (2): s390/entry: Move early program check handler to entry.S s390/entry: Make early program check handler relocated lowcore aware
Heiner Kallweit (1): r8169: disable ALDPS per default for RTL8125
Helge Deller (1): crypto: xor - fix template benchmarking
Herbert Xu (2): crypto: n2 - Set err to EINVAL if snprintf fails for hmac crypto: caam - Pad SG length when allocating hash edesc
Herve Codina (2): soc: fsl: cpm1: qmc: Update TRNSYNC only in transparent mode soc: fsl: cpm1: tsa: Fix tsa_write8()
Hobin Woo (1): ksmbd: make __dir_empty() compatible with POSIX
Howard Hsu (3): wifi: mt76: mt7996: fix HE and EHT beamforming capabilities wifi: mt76: mt7996: fix EHT beamforming capability check wifi: mt76: mt7915: fix rx filter setting for bfee functionality
Huang Shijie (1): sched/deadline: Fix schedstats vs deadline servers
Ian Rogers (3): perf vendor events: SKX, CLX, SNR uncore cache event fixes perf inject: Fix leader sampling inserting additional samples perf time-utils: Fix 32-bit nsec parsing
Ilan Peer (1): wifi: mac80211: Check for missing VHT elements only for 5 GHz
Ilpo Järvinen (1): PCI: Wait for Link before restoring Downstream Buses
Jack Wang (1): RDMA/rtrs: Reset hb_missed_cnt after receiving other traffic from peer
Jack Xiao (2): drm/amdgpu/mes12: set enable_level_process_quantum_check drm/amdgpu/mes12: switch SET_SHADER_DEBUGGER pkt to mes schq pipe
Jacky Bai (1): clk: imx: composite-93: keep root clock on when mcore enabled
Jake Hamby (1): can: m_can: enable NAPI before enabling interrupts
James Clark (1): perf scripts python cs-etm: Restore first sample log in verbose mode
Jann Horn (2): firmware_loader: Block path traversal f2fs: Require FMODE_WRITE for atomic write ioctls
Jason Andryuk (1): fbdev: xen-fbfront: Assign fb_info->device
Jason Gerecke (2): HID: wacom: Support sequence numbers smaller than 16-bit HID: wacom: Do not warn about dropped packets for first packet
Jason Gunthorpe (7): iommu/amd: Allocate the page table root using GFP_KERNEL iommu/amd: Move allocation of the top table into v1_alloc_pgtable iommu/amd: Set the pgsize_bitmap correctly iommu/amd: Do not set the D bit on AMD v2 table entries iommufd/selftest: Fix buffer read overrrun in the dirty test iommufd: Check the domain owner of the parent before creating a nesting domain iommufd: Protect against overflow of ALIGN() during iova allocation
Jason Wang (5): virtio: rename virtio_config_enabled to virtio_config_core_enabled virtio: allow driver to disable the configure change notification virtio-net: synchronize operstate with admin state on up/down virtio-net: synchronize probe with ndo_set_features vhost_vdpa: assign irq bypass producer token correctly
Jason-JH.Lin (1): drm/mediatek: Fix missing configuration flags in mtk_crtc_ddp_config()
Javier Carrasco (3): leds: bd2606mvv: Fix device child node usage in bd2606mvv_probe() leds: pca995x: Use device_for_each_child_node() to access device child nodes leds: pca995x: Fix device child node usage in pca995x_probe()
Jeff Layton (3): nfsd: remove unneeded EEXIST error check in nfsd_do_file_acquire nfsd: fix refcount leak when file is unhashed after being found nfsd: fix initial getattr on write delegation
Jeff Xu (1): selftest mm/mseal: fix test_seal_mremap_move_dontunmap_anyaddr
Jens Axboe (3): io_uring/rw: treat -EOPNOTSUPP for IOCB_NOWAIT like -EAGAIN io_uring: check for presence of task_work rather than TIF_NOTIFY_SIGNAL io_uring/sqpoll: retain test for whether the CPU is valid
Jeongjun Park (2): jfs: fix out-of-bounds in dbNextAG() and diAlloc() mm: migrate: annotate data-race in migrate_folio_unmap()
Jiangshan Yi (1): samples/bpf: Fix compilation errors with cf-protection option
Jiawei Ye (2): wifi: wilc1000: fix potential RCU dereference issue in wilc_parse_join_bss_param smackfs: Use rcu_assign_pointer() to ensure safe assignment in smk_set_cipso
Jie Gan (2): Coresight: Set correct cs_mode for TPDM to fix disable issue Coresight: Set correct cs_mode for dummy source to fix disable issue
Jing Zhang (1): drivers/perf: Fix ali_drw_pmu driver interrupt status clearing
Jinjie Ruan (8): net: enetc: Use IRQF_NO_AUTOEN flag in request_irq() spi: bcmbca-hsspi: Fix missing pm_runtime_disable() mtd: rawnand: mtk: Use for_each_child_of_node_scoped() riscv: Fix fp alignment bug in perf_callchain_user() ntb: intel: Fix the NULL vs IS_ERR() bug for debugfs_create_dir() spi: atmel-quadspi: Undo runtime PM changes at driver exit time spi: spi-fsl-lpspi: Undo runtime PM changes at driver exit time driver core: Fix a potential null-ptr-deref in module_add_driver()
Jiri Slaby (SUSE) (1): serial: don't use uninitialized value in uart_poll_init()
Jiwon Kim (1): bonding: Fix unnecessary warnings and logs from bond_xdp_get_xmit_slave()
Johan Hovold (3): serial: qcom-geni: fix fifo polling timeout serial: qcom-geni: fix false console tx restart serial: qcom-geni: fix console corruption
Johannes Berg (2): wifi: iwlwifi: config: label 'gl' devices as discrete um: remove ARCH_NO_PREEMPT_DYNAMIC
John B. Wyatt IV (1): pm:cpupower: Add missing powercap_set_enabled() stub function
John Garry (2): scsi: sd: Don't check if a write for REQ_ATOMIC scsi: block: Don't check REQ_ATOMIC for reads
Jon Hunter (1): arm64: tegra: Correct location of power-sensors for IGX Orin
Jonas Blixt (1): watchdog: imx_sc_wdt: Don't disable WDT in suspend
Jonas Karlman (3): arm64: dts: rockchip: Correct vendor prefix for Hardkernel ODROID-M1 drm/rockchip: dw_hdmi: Fix reading EDID when using a forced mode clk: rockchip: Set parent rate for DCLK_VOP clock on RK3228
Jonathan McDowell (1): tpm: Clean up TPM space after command failure
Josh Hunt (1): tcp: check skb is non-NULL in tcp_rto_delta_us()
Juergen Gross (9): xen: use correct end address of kernel for conflict checking xen: introduce generic helper checking for memory map conflicts xen: move max_pfn in xen_memory_setup() out of function scope xen: add capability to remap non-RAM pages to different PFNs xen: tolerate ACPI NVS memory overlapping with Xen allocated memory xen/swiotlb: add alignment check for dma buffers xen/swiotlb: fix allocated size xen: move checks for e820 conflicts further up xen: allow mapping ACPI data using a different physical address
Julian Sun (1): vfs: fix race between evice_inodes() and find_inode()&iput()
Junlin Li (2): drivers: media: dvb-frontends/rtl2832: fix an out-of-bounds write error drivers: media: dvb-frontends/rtl2830: fix an out-of-bounds write error
Junxian Huang (5): RDMA/hns: Don't modify rq next block addr in HIP09 QPC RDMA/hns: Fix VF triggering PF reset in abnormal interrupt handler RDMA/hns: Optimize hem allocation performance RDMA/hns: Fix restricted __le16 degrades to integer issue RDMA/hns: Fix ah error counter in sw stat not increasing
Justin Iurman (1): net: ipv6: rpl_iptunnel: Fix memory leak in rpl_input
Justin Tee (1): scsi: lpfc: Restrict support for 32 byte CDBs to specific HBAs
Kaixin Wang (1): net: seeq: Fix use after free vulnerability in ether3 Driver Due to Race Condition
Kamlesh Gurudasani (1): padata: Honor the caller's alignment in case of chunk_size 0
Kan Liang (5): perf report: Fix --total-cycles --stdio output error perf hist: Don't set hpp_fmt_value for members in --no-group perf mem: Check mem_events for all eligible PMUs perf mem: Fix missed p-core mem events on ADL and RPL perf/x86/intel: Allow to setup LBR for counting event for BPF
Kang Yang (1): wifi: ath11k: use work queue to process beacon tx event
Kees Cook (2): leds: gpio: Set num_leds after allocation interconnect: icc-clk: Add missed num_nodes initialization
Kemeng Shi (4): ext4: avoid buffer_head leak in ext4_mark_inode_used() ext4: avoid potential buffer_head leak in __ext4_new_inode() ext4: avoid negative min_clusters in find_group_orlov() quota: avoid missing put_quota_format when DQUOT_SUSPENDED is passed
Kenneth Feng (1): drm/amd/pm: update workload mask after the setting
Kexy Biscuit (1): tpm: export tpm2_sessions_init() to fix ibmvtpm building
Konrad Dybcio (1): iommu/arm-smmu-qcom: Work around SDM845 Adreno SMMU w/ 16K pages
Krishna chaitanya chundru (2): perf/dwc_pcie: Fix registration issue in multi PCIe controller instances perf/dwc_pcie: Always register for PCIe bus notifier
Krzysztof Kozlowski (13): ARM: dts: imx7d-zii-rmu2: fix Ethernet PHY pinctrl property ARM: versatile: fix OF node leak in CPUs prepare reset: berlin: fix OF node leak in probe() error path reset: k210: fix OF node leak in probe() error path iio: magnetometer: ak8975: drop incorrect AK09116 compatible dt-bindings: iio: asahi-kasei,ak8975: drop incorrect AK09116 compatible soc: versatile: integrator: fix OF node leak in probe() error path bus: integrator-lm: fix OF node leak in probe() cpuidle: riscv-sbi: Use scoped device node handling to fix missing of_node_put ARM: dts: imx6ul-geam: fix fsl,pins property in tscgrp pinctrl ARM: dts: imx6ull-seeed-npi: fix fsl,pins property in tscgrp pinctrl soc: versatile: realview: fix memory leak during device remove soc: versatile: realview: fix soc_dev leak during device remove
Kuniyuki Iwashima (6): af_unix: Don't call skb_get() for OOB skb. af_unix: Remove single nest in manage_oob(). af_unix: Rename unlinked_skb in manage_oob(). af_unix: Move spin_lock() in manage_oob(). af_unix: Don't return OOB skb in manage_oob(). can: bcm: Clear bo->bcm_proc_read after remove_proc_entry().
Lad Prabhakar (5): arm64: dts: renesas: r9a08g045: Correct GICD and GICR sizes arm64: dts: renesas: r9a07g043u: Correct GICD and GICR sizes arm64: dts: renesas: r9a07g054: Correct GICD and GICR sizes arm64: dts: renesas: r9a07g044: Correct GICD and GICR sizes pinctrl: renesas: rzg2l: Return -EINVAL if the pin doesn't support PIN_CFG_OEN
Lang Yu (1): drm/amdgpu: fix invalid fence handling in amdgpu_vm_tlb_flush
Lasse Collin (1): xz: cleanup CRC32 edits from 2018
Laurent Pinchart (1): Remove *.orig pattern from .gitignore
Leo Ma (1): drm/amd/display: Add HDMI DSC native YCbCr422 support
Leon Hwang (2): bpf, x64: Fix tailcall hierarchy bpf, arm64: Fix tailcall hierarchy
Li Chen (1): ACPI: resource: Do IRQ override on MECHREV GM7XG0M
Li Lingfeng (2): nfsd: return -EINVAL when namelen is 0 nfs: fix memory leak in error path of nfs4_do_reclaim
Li Zhijian (1): nvdimm: Fix devs leaks in scan_labels()
Liam R. Howlett (1): mm/damon/vaddr: protect vma traversal in __damon_va_thre_regions() with rcu read lock
Linus Walleij (2): ASoC: tas2781-i2c: Drop weird GPIO code ASoC: tas2781-i2c: Get the right GPIO line
Liu Ying (1): drm/bridge: lontium-lt8912b: Validate mode in drm_bridge_funcs::mode_valid()
Lorenzo Bianconi (3): spi: airoha: fix dirmap_{read,write} operations spi: airoha: fix airoha_snand_{write,read}_data data_len estimation spi: airoha: remove read cache in airoha_snand_dirmap_read()
Luca Stefani (1): btrfs: always update fstrim_range on failure in FITRIM ioctl
Luiz Augusto von Dentz (3): Bluetooth: hci_core: Fix sending MGMT_EV_CONNECT_FAILED Bluetooth: hci_sync: Ignore errors from HCI_OP_REMOTE_NAME_REQ_CANCEL Bluetooth: btusb: Fix not handling ZPL/short-transfer
MD Danish Anwar (1): arm64: dts: ti: k3-am654-idk: Fix dtbs_check warning in ICSSG dmas
Ma Ke (8): spi: ppc4xx: handle irq_of_parse_and_map() errors ASoC: rt5682s: Return devm_of_clk_add_hw_provider to transfer the error ASoC: rt5682: Return devm_of_clk_add_hw_provider to transfer the error pps: add an error check in parport_attach wifi: mt76: mt7921: Check devm_kasprintf() returned value wifi: mt76: mt7915: check devm_kasprintf() returned value wifi: mt76: mt7996: fix NULL pointer dereference in mt7996_mcu_sta_bfer_he wifi: mt76: mt7615: check devm_kasprintf() returned value
Maciej Fijalkowski (1): xsk: fix batch alloc API on non-coherent systems
Maciej W. Rozycki (4): PCI: Revert to the original speed after PCIe failed link retraining PCI: Clear the LBMS bit after a link retrain PCI: Correct error reporting with PCIe failed link retraining PCI: Use an error code with PCIe failed link retraining
Manish Pandey (1): scsi: ufs: qcom: Update MODE_MAX cfg_bw value
Manivannan Sadhasivam (1): PCI: qcom-ep: Enable controller resources like PHY only after refclk is available
Marc Aurèle La France (1): debugfs show actual source in /proc/mounts
Marc Gonzalez (1): iommu/arm-smmu-qcom: hide last LPASS SMMU context bank from linux
Marc Kleine-Budde (1): can: m_can: m_can_close(): stop clocks after device has been shut down
Mario Limonciello (1): drm/amd/display: Validate backlight caps are sane
Mark Bloch (1): RDMA/mlx5: Obtain upper net device only when needed
Mark Brown (1): kselftest/arm64: Actually test SME vector length changes via sigreturn
Markus Schneider-Pargmann (1): serial: 8250: omap: Cleanup on error in request_irq
Martin Karsten (1): eventpoll: Annotate data-race of busy_poll_usecs
Martin Tsai (1): drm/amd/display: Clean up dsc blocks in accelerated mode
Martin Wilck (1): scsi: sd: Fix off-by-one error in sd_read_block_characteristics()
Masami Hiramatsu (Google) (2): selftests/ftrace: Add required dependency for kprobe tests selftests/ftrace: Fix eventfs ownership testcase to find mount point
Mathias Nyman (1): xhci: Set quirky xHC PCI hosts to D3 _after_ stopping and freeing them.
Matthew Auld (1): drm/xe: fix engine_class bounds check again
Matthew Brost (1): drm/xe: Use reserved copy engine for user binds on faulting devices
Max Hawking (1): ntb_perf: Fix printk format
Md Haris Iqbal (1): RDMA/rtrs-clt: Reset cid to con_num - 1 to stay in bounds
Miaohe Lin (1): mm/huge_memory: ensure huge_zero_folio won't have large_rmappable flag set
Michael Ellerman (1): powerpc/atomic: Use YZ constraints for DS-form instructions
Michael Guralnik (4): RDMA/mlx5: Fix counter update on MR cache mkey creation RDMA/mlx5: Limit usage of over-sized mkeys from the MR cache RDMA/mlx5: Drop redundant work canceling from clean_keys() RDMA/mlx5: Fix MR cache temp entries cleanup
Michael Lo (1): wifi: mt76: mt7925: fix a potential association failure upon resuming
Michal Kubiak (1): idpf: fix netdev Tx queue stop/wake
Michal Witwicki (3): crypto: qat - disable IOV in adf_dev_stop() crypto: qat - fix recovery flow for VFs crypto: qat - ensure correct order in VF restarting handler
Mickaël Salaün (1): fs: Fix file_set_fowner LSM hook inconsistencies
Mikhail Lobanov (2): RDMA/cxgb4: Added NULL check for lookup_atid drbd: Add NULL check for net_conf to prevent dereference in state validation
Mikulas Patocka (4): dm integrity: fix gcc 5 warning Revert "dm: requeue IO if mapping table not yet available" dm-verity: restart or panic on an I/O error Revert: "dm-verity: restart or panic on an I/O error"
Ming Lei (3): ublk: move zone report data out of request pdu nbd: fix race between timeout and normal completion lib/sbitmap: define swap_lock as raw_spinlock_t
Ming Yen Hsieh (2): wifi: mt76: mt7921: fix wrong UNII-4 freq range check for the channel usage wifi: mt76: mt7925: fix a potential array-index-out-of-bounds issue for clc
Miquel Raynal (2): mtd: rawnand: mtk: Factorize out the logic cleaning mtk chips mtd: rawnand: mtk: Fix init error path
Mirsad Todorovac (1): mtd: slram: insert break after errors in parsing the map
Mukesh Ojha (1): firmware: qcom: scm: Disable SDI and write no dump to dump mode
Mykyta Yatsenko (1): bpftool: Fix handling enum64 in btf dump sorting
Namhyung Kim (5): perf mem: Free the allocated sort string, fixing a leak perf lock contention: Change stack_id type to s32 perf dwarf-aux: Check allowed location expressions when collecting variables perf annotate-data: Fix off-by-one in location range check perf dwarf-aux: Handle bitfield members from pointer access
Namjae Jeon (2): ksmbd: allow write with FILE_APPEND_DATA ksmbd: handle caseless file creation
NeilBrown (1): nfsd: untangle code in nfsd4_deleg_getattr_conflict()
Nicholas Kazlauskas (1): drm/amd/display: Block dynamic IPS2 on DCN35 for incompatible FW versions
Nick Morrow (1): wifi: rtw88: 8821cu: Remove VID/PID 0bda:c82c
Nikita Zhandarovich (4): drm/radeon/evergreen_cs: fix int overflow errors in cs track offsets f2fs: fix several potential integer overflows in file offsets f2fs: prevent possible int overflow in dir_block_index() f2fs: avoid potential int overflow in sanity_check_area_boundary()
Niklas Cassel (1): ata: libata: Clear DID_TIME_OUT for ATA PT commands with sense data
Nishanth Menon (1): cpufreq: ti-cpufreq: Introduce quirks to handle syscon fails appropriately
Nuno Sa (1): Input: adp5588-keys - fix check on return code
NÃcolas F. R. A. Prado (1): kselftest: dt: Ignore nodes that have ancestors disabled
Ojaswin Mujoo (1): ext4: check stripe size compatibility on remount as well
Olaf Hering (1): mount: handle OOM on mnt_warn_timestamp_expiry
Oleg Nesterov (1): bpf: Fix use-after-free in bpf_uprobe_multi_link_attach()
Oliver Neukum (5): usbnet: fix cyclical race on disconnect with work queue USB: appledisplay: close race between probe and completion handler USB: misc: cypress_cy7c63: check for short transfer USB: class: CDC-ACM: fix race between get_serial and set_serial USB: misc: yurex: fix race between read and write
P Praneesh (2): wifi: ath12k: fix BSS chan info request WMI command wifi: ath12k: match WMI BSS chan info structure with firmware definition
Pablo Neira Ayuso (7): netfilter: nf_tables: elements with timeout below CONFIG_HZ never expire netfilter: nf_tables: reject element expiration with no timeout netfilter: nf_tables: reject expiration higher than timeout netfilter: nf_tables: remove annotation to access set timeout while holding lock netfilter: nft_dynset: annotate data-races around set timeout netfilter: nf_tables: use rcu chain hook list iterator from netlink dump path netfilter: nf_tables: missing objects with no memcg accounting
Paolo Bonzini (1): Documentation: KVM: fix warning in "make htmldocs"
Patrisious Haddad (1): IB/core: Fix ib_cache_setup_one error flow cleanup
Paul Barker (2): net: ravb: Fix maximum TX frame size for GbEth devices net: ravb: Fix R-Car RX frame size limit
Paul Moore (1): lsm: add the inode_free_security_rcu() LSM implementation hook
Pavan Kumar Paluri (1): crypto: ccp - Properly unregister /dev/sev on sev PLATFORM_STATUS failure
Pawel Laszczak (2): usb: xhci: fix loss of data on Cadence xHC usb: cdnsp: Fix incorrect usb_request status
Peng Fan (5): clk: imx: composite-8m: Enable gate clk with mcore_booted clk: imx: imx8qxp: Register dc0_bypass0_clk before disp clk clk: imx: imx8qxp: Parent should be initialized earlier than the clock remoteproc: imx_rproc: Correct ddr alias for i.MX8M remoteproc: imx_rproc: Initialize workqueue earlier
Pengfei Li (1): clk: imx: fracn-gppll: fix fractional part of PLL getting lost
Peter Chiu (4): wifi: mt76: mt7996: use hweight16 to get correct tx antenna wifi: mt76: mt7996: fix traffic delay when switching back to working channel wifi: mt76: mt7996: fix wmm set of station interface to 3 wifi: mt76: connac: fix checksum offload fields of connac3 RXD
Phil Sutter (2): netfilter: nf_tables: Keep deleted flowtable hooks until after RCU selftests: netfilter: Avoid hanging ipvs.sh
Ping-Ke Shih (2): wifi: rtw89: remove unused C2H event ID RTW89_MAC_C2H_FUNC_READ_WOW_CAM to prevent out-of-bounds reading wifi: mac80211: don't use rate mask for offchannel TX either
Qingqing Zhou (1): arm64: dts: qcom: sa8775p: Mark APPS and PCIe SMMUs as DMA coherent
Qiu-ji Chen (1): drbd: Fix atomicity violation in drbd_uuid_set_bm()
Qiuxu Zhuo (1): EDAC/igen6: Fix conversion of system address to physical memory address
Qu Wenruo (2): btrfs: subpage: fix the bitmap dump which can cause bitmap corruption btrfs: tree-checker: fix the wrong output of data backref objectid
Rafael J. Wysocki (7): thermal: core: Fold two functions into their respective callers thermal: core: Fix rounding of delay jiffies thermal: gov_bang_bang: Adjust states of all uninitialized instances thermal: core: Store trip sysfs attributes in thermal_trip_desc thermal: sysfs: Get to trips via attribute pointers thermal: sysfs: Refine the handling of trip hysteresis changes thermal: sysfs: Add sanity checks for trip temperature and hysteresis
Rex Lu (1): wifi: mt76: mt7996: fix handling mbss enable/disable
Richard Zhu (2): PCI: imx6: Fix establish link failure in EP mode for i.MX8MM and i.MX8MP PCI: imx6: Fix i.MX8MP PCIe EP's occasional failure to trigger MSI
Riyan Dhiman (1): block: fix potential invalid pointer dereference in blk_add_partition
Rob Clark (1): iommu/arm-smmu: Un-demote unhandled-fault msg
Robert Hancock (1): i2c: xiic: Try re-initialization on bus busy timeout
Robin Chen (1): drm/amd/display: Round calculated vtotal
Robin Murphy (3): perf/arm-cmn: Refactor node ID handling. Again. perf/arm-cmn: Fix CCLA register offset perf/arm-cmn: Ensure dtm_idx is big enough
Rodrigo Siqueira (1): drm/amd/display: Improve FAM control for DCN401
Roi Azarzar (1): NFSv4.2: Fix detection of "Proxying of Times" server support
Roman Li (1): drm/amd/display: Update IPS default mode for DCN35/DCN351
Roman Smirnov (2): Revert "media: tuners: fix error return code of hybrid_tuner_request_state()" KEYS: prevent NULL pointer dereference in find_asymmetric_key()
Ryusuke Konishi (3): nilfs2: fix potential null-ptr-deref in nilfs_btree_insert() nilfs2: determine empty node blocks as corrupted nilfs2: fix potential oob read in nilfs_btree_check_delete()
Saleemkhan Jamadar (1): drm/amdgpu/vcn: enable AV1 on both instances
Samasth Norway Ananda (1): x86/PCI: Check pcie_find_root_port() return for NULL
Sandeep Dhavale (1): erofs: fix error handling in z_erofs_init_decompressor
Sean Anderson (5): PCI: xilinx-nwl: Fix register misspelling PCI: xilinx-nwl: Clean up clock on probe failure/removal net: xilinx: axienet: Schedule NAPI in two steps net: xilinx: axienet: Fix packet counting PCI: xilinx-nwl: Fix off-by-one in INTx IRQ handler
Sean Christopherson (4): KVM: x86: Enforce x2APIC's must-be-zero reserved ICR bits KVM: x86: Move x2APIC ICR helper above kvm_apic_write_nodecode() KVM: x86: Re-split x2APIC ICR into ICR+ICR2 for AMD (x2AVIC) KVM: Use dedicated mutex to protect kvm_usage_count to avoid deadlock
Sebastien Laveze (1): clk: imx: imx6ul: fix default parent for enet*_ref_sel
Selvin Xavier (1): RDMA/bnxt_re: Fix the table size for PSN/MSN entries
Shenghao Ding (1): ASoC: tas2781: Fix a compiling warning reported by robot kernel test due to adding tas2563_dvc_table
Shengjiu Wang (1): clk: imx: clk-audiomix: Correct parent clock for earc_phy and audpll
Sherry Yang (1): drm/msm: fix %s null argument error
Shin'ichiro Kawasaki (1): f2fs: check discard support for conventional zones
Shu Han (1): mm: call the security_mmap_file() LSM hook in remap_file_pages()
Shuah Khan (1): selftests:resctrl: Fix build failure on archs without __cpuid_count()
Shubhrajyoti Datta (1): EDAC/synopsys: Fix error injection on Zynq UltraScale+
Siddharth Vadapalli (2): PCI: dra7xx: Fix threaded IRQ request for "dra7xx-pcie-main" IRQ PCI: dra7xx: Fix error handling when IRQ request fails in probe
Simon Horman (2): eth: fbnic: select DEVLINK and PAGE_POOL netfilter: ctnetlink: compile ctnetlink_label_size with CONFIG_NF_CONNTRACK_EVENTS
Slark Xiao (1): bus: mhi: host: pci_generic: Update EDL firmware path for Foxconn modems
Snehal Koukuntla (1): KVM: arm64: Add memory length checks and remove inline in do_ffa_mem_xfer
Song Liu (1): bpf: lsm: Set bpf_lsm_blob_sizes.lbs_task to 0
Sreekant Somasekharan (1): drm/amdkfd: Add SDMA queue quantum support for GFX12
Srinivasan Shanmugam (1): drm/amd/display: Add null check for set_output_gamma in dcn30_set_output_transfer_func
Stefan Mätje (1): can: esd_usb: Remove CAN_CTRLMODE_3_SAMPLES for CAN-USB/3-FD
Stefan Wahren (1): drm/vc4: hdmi: Handle error case of pm_runtime_resume_and_get
Steven Rostedt (Google) (1): selftests/ftrace: Fix test to handle both old and new kernels
Su Hui (1): net: tipc: avoid possible garbage value
Sung Joon Kim (1): drm/amd/display: Disable SYMCLK32_LE root clock gating
Suzuki K Poulose (1): coresight: tmc: sg: Do not leak sg_table
Svyatoslav Pankratov (1): crypto: qat - fix "Full Going True" macro definition
Takashi Sakamoto (1): firewire: core: correct range of block for case of switch statement
Thadeu Lima de Souza Cascardo (2): ext4: return error on ext4_find_inline_entry ext4: avoid OOB when system.data xattr changes underneath the filesystem
Thomas Weißschuh (3): net: ipv6: select DST_CACHE from IPV6_RPL_LWTUNNEL ACPI: sysfs: validate return type of _STR method tools/nolibc: include arch.h from string.h
Tianchen Ding (1): sched/fair: Make SCHED_IDLE entity be preempted in strict hierarchy
Tiezhu Yang (2): objtool: Handle frame pointer related instructions compiler.h: specify correct attribute for .rodata..c_jump_table
Toke Høiland-Jørgensen (1): wifi: ath9k: Remove error checks when creating debugfs entries
Tom Chung (1): drm/amd/display: Reset VRR config during resume
Tomas Marek (1): usb: dwc2: drd: fix clock gating on USB role switch
Tommy Huang (1): i2c: aspeed: Update the stop sw state when the bus recovery occurs
Tony Ambardar (27): selftests/bpf: Fix error linking uprobe_multi on mips selftests/bpf: Fix wrong binary in Makefile log output tools/runqslower: Fix LDFLAGS and add LDLIBS support selftests/bpf: Use pid_t consistently in test_progs.c selftests/bpf: Fix compile error from rlim_t in sk_storage_map.c selftests/bpf: Fix error compiling bpf_iter_setsockopt.c with musl libc selftests/bpf: Drop unneeded error.h includes selftests/bpf: Fix missing ARRAY_SIZE() definition in bench.c selftests/bpf: Fix missing UINT_MAX definitions in benchmarks selftests/bpf: Fix missing BUILD_BUG_ON() declaration selftests/bpf: Fix include of <sys/fcntl.h> selftests/bpf: Fix compiling parse_tcp_hdr_opt.c with musl-libc selftests/bpf: Fix compiling kfree_skb.c with musl-libc selftests/bpf: Fix compiling flow_dissector.c with musl-libc selftests/bpf: Fix compiling tcp_rtt.c with musl-libc selftests/bpf: Fix compiling core_reloc.c with musl-libc selftests/bpf: Fix errors compiling lwt_redirect.c with musl libc selftests/bpf: Fix errors compiling decap_sanity.c with musl libc selftests/bpf: Fix errors compiling crypto_sanity.c with musl libc selftests/bpf: Fix errors compiling cg_storage_multi.h with musl libc selftests/bpf: Fix arg parsing in veristat, test_progs selftests/bpf: Fix error compiling test_lru_map.c selftests/bpf: Fix C++ compile error from missing _Bool type selftests/bpf: Fix redefinition errors compiling lwt_reroute.c selftests/bpf: Fix compile if backtrace support missing in libc selftests/bpf: Fix error compiling tc_redirect.c with musl libc libbpf: Ensure new BTF objects inherit input endianness
Uros Bizjak (1): x86/boot/64: Strip percpu address space when setting up GDT descriptors
Uwe Kleine-König (1): media: staging: media: starfive: camss: Drop obsolete return value documentation
VanGiang Nguyen (1): padata: use integer wrap around to prevent deadlock on seq_nr overflow
Varadarajan Narayanan (1): clk: qcom: ipq5332: Register gcc_qdss_tsctr_clk_src
Vasant Hegde (1): iommu/amd: Handle error path in amd_iommu_probe_device()
Vasileios Amoiridis (1): iio: chemical: bme680: Fix read/write ops to device by adding mutexes
Vasily Gorbik (1): s390/ftrace: Avoid calling unwinder in ftrace_return_address()
Vasily Khoruzhick (2): ACPICA: Implement ACPI_WARNING_ONCE and ACPI_ERROR_ONCE ACPICA: executer/exsystem: Don't nag user about every Stall() violating the spec
Vishal Moola (Oracle) (2): mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway mm: change vmf_anon_prepare() to __vmf_anon_prepare()
Vitaliy Shevtsov (1): RDMA/irdma: fix error message in irdma_modify_qp_roce()
Vladimir Lypak (4): drm/msm/a5xx: disable preemption in submits by default drm/msm/a5xx: properly clear preemption records on resume drm/msm/a5xx: fix races in preemption evaluation stage drm/msm/a5xx: workaround early ring-buffer emptiness check
Vladimir Oltean (1): net: phy: aquantia: fix -ETIMEDOUT PHY probe failure when firmware not present
Wang Jianzheng (1): pinctrl: mvebu: Fix devinit_dove_pinctrl_probe function
WangYuli (2): drm/amd/amdgpu: Properly tune the size of struct usb: xHCI: add XHCI_RESET_ON_RESUME quirk for Phytium xHCI host
Weili Qian (3): crypto: hisilicon/hpre - mask cluster timeout error crypto: hisilicon/qm - reset device before enabling it crypto: hisilicon/qm - inject error before stopping queue
Wenbo Li (1): virtio_net: Fix mismatched buf address when unmapping for small packets
Werner Sembach (4): Input: i8042 - add TUXEDO Stellaris 16 Gen5 AMD to i8042 quirk table Input: i8042 - add TUXEDO Stellaris 15 Slim Gen6 AMD to i8042 quirk table Input: i8042 - add another board name for TUXEDO Stellaris Gen5 AMD line ACPI: resource: Add another DMI match for the TongFang GMxXGxx
Wolfram Sang (1): ipmi: docs: don't advertise deprecated sysfs entries
Wouter Verhelst (1): nbd: correct the maximum value for discard sectors
Xiu Jianfeng (1): cgroup/pids: Avoid spurious event notification
Xu Kuohai (2): bpf, lsm: Add check for BPF LSM return value bpf: Fix compare error in function retval_range_within
Yanfei Xu (1): cxl/pci: Fix to record only non-zero ranges
Yang Jihong (2): perf sched timehist: Fix missing free of session in perf_sched__timehist() perf sched timehist: Fixed timestamp error when unable to confirm event sched_in time
Yang Yingliang (1): pinctrl: single: fix missing error code in pcs_probe()
Yanteng Si (1): net: stmmac: dwmac-loongson: Init ref and PTP clocks rate
Ye Li (1): clk: imx: composite-7ulp: Check the PCC present bit
Yeongjin Gil (2): f2fs: Create COW inode from parent dentry for atomic write f2fs: compress: don't redirty sparse cluster during {,de}compress
Yicong Yang (3): drivers/perf: hisi_pcie: Record hardware counts correctly drivers/perf: hisi_pcie: Fix TLP headers bandwidth counting perf stat: Display iostat headers correctly
Yihan Zhu (1): drm/amd/display: Enable DML2 override_det_buffer_size_kbytes
Yonghong Song (1): bpf: Fail verification for sign-extension of packet data/data_end/data_meta
Yosry Ahmed (1): x86/mm: Use IPIs to synchronize LAM enablement
Youssef Samir (1): net: qrtr: Update packets cloning when broadcasting
Yu Kuai (6): block, bfq: fix possible UAF for bfqq->bic with merge chain block, bfq: choose the last bfqq from merge chain in bfq_setup_cooperator() block, bfq: don't break merge chain in bfq_split_bfqq() block, bfq: fix uaf for accessing waker_bfqq after splitting block, bfq: fix procress reference leakage for bfqq in merge chain md: Don't flush sync_work in md_write_start()
Yu Zhao (1): mm/hugetlb_vmemmap: batch HVO work when demoting
Yuesong Li (1): drivers:drm:exynos_drm_gsc:Fix wrong assignment in gsc_bind()
Yujie Liu (1): sched/numa: Fix the vma scan starving issue
Yunfei Dong (3): media: mediatek: vcodec: Fix H264 multi stateless decoder smatch warning media: mediatek: vcodec: Fix VP8 stateless decoder smatch warning media: mediatek: vcodec: Fix H264 stateless decoder smatch warning
Yuntao Liu (3): ALSA: hda: cs35l41: fix module autoloading hwmon: (ntc_thermistor) fix module autoloading clk: starfive: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage
Zhang Changzhong (1): can: j1939: use correct function name in comment
Zhen Lei (1): debugobjects: Fix conditions in fill_pool()
Zhiguo Niu (1): lockdep: fix deadlock issue between lockdep and rcu
Zhikai Zhai (1): drm/amd/display: Skip to enable dsc if it has been off
Zhipeng Wang (1): clk: imx: imx8mp: fix clock tree update of TF-A managed clocks
Zhu Yanjun (1): RDMA/iwcm: Fix WARNING:at_kernel/workqueue.c:#check_flush_dependency
Zijun Hu (1): driver core: Fix error handling in driver API device_rename()
Zong-Zhe Yang (2): wifi: mac80211_hwsim: correct MODULE_PARM_DESC of multi_radio wifi: rtw89: wow: fix wait condition for AOAC report request
hhorace (1): wifi: cfg80211: fix bug of mapping AF3x to incorrect User Priority
tangbin (1): ASoC: loongson: fix error release
wenglianfa (2): RDMA/hns: Fix Use-After-Free of rsv_qp on HIP08 RDMA/hns: Fix the overflow risk of hem_list_calc_ba_range()
yangerkun (1): ext4: clear EXT4_GROUP_INFO_WAS_TRIMMED_BIT even mount with discard
yangyun (1): fuse: use exclusive lock when FUSE_I_CACHE_IO_MODE is set
diff --git a/.gitignore b/.gitignore index 7902adf4f7f1..58fdbb35e2f1 100644 --- a/.gitignore +++ b/.gitignore @@ -142,7 +142,6 @@ GTAGS # id-utils files ID
-*.orig *~ #*#
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 index 31dbb390573f..c431f0a13cf5 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 +++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 @@ -3,7 +3,7 @@ KernelVersion: Contact: linux-iio@vger.kernel.org Description: Reading this returns the valid values that can be written to the - on_altvoltage0_mode attribute: + filter_mode attribute:
- auto -> Adjust bandpass filter to track changes in input clock rate. - manual -> disable/unregister the clock rate notifier / input clock tracking. diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index 50327c05be8d..39c52385f11f 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -55,6 +55,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 | +----------------+-----------------+-----------------+-----------------------------+ +| Ampere | AmpereOne AC04 | AC04_CPU_10 | AMPERE_ERRATUM_AC03_CPU_38 | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml index 9790f75fc669..fe5145d3b73c 100644 --- a/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml +++ b/Documentation/devicetree/bindings/iio/magnetometer/asahi-kasei,ak8975.yaml @@ -23,7 +23,6 @@ properties: - ak8963 - ak09911 - ak09912 - - ak09916 deprecated: true
reg: diff --git a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml index 793986c5af7f..daeab5c0758d 100644 --- a/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,layerscape-pcie.yaml @@ -22,18 +22,20 @@ description:
properties: compatible: - enum: - - fsl,ls1021a-pcie - - fsl,ls2080a-pcie - - fsl,ls2085a-pcie - - fsl,ls2088a-pcie - - fsl,ls1088a-pcie - - fsl,ls1046a-pcie - - fsl,ls1043a-pcie - - fsl,ls1012a-pcie - - fsl,ls1028a-pcie - - fsl,lx2160a-pcie - + oneOf: + - enum: + - fsl,ls1012a-pcie + - fsl,ls1021a-pcie + - fsl,ls1028a-pcie + - fsl,ls1043a-pcie + - fsl,ls1046a-pcie + - fsl,ls1088a-pcie + - fsl,ls2080a-pcie + - fsl,ls2085a-pcie + - fsl,ls2088a-pcie + - items: + - const: fsl,lx2160ar2-pcie + - const: fsl,ls2088a-pcie reg: maxItems: 2
diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml index 4a5f41bde00f..902db92da832 100644 --- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml +++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml @@ -21,6 +21,7 @@ properties: - nxp,imx8mm-fspi - nxp,imx8mp-fspi - nxp,imx8qxp-fspi + - nxp,imx8ulp-fspi - nxp,lx2160a-fspi - items: - enum: diff --git a/Documentation/driver-api/ipmi.rst b/Documentation/driver-api/ipmi.rst index e224e47b6b09..dfa021eacd63 100644 --- a/Documentation/driver-api/ipmi.rst +++ b/Documentation/driver-api/ipmi.rst @@ -540,7 +540,7 @@ at module load time (for a module) with:: alerts_broken
The addresses are normal I2C addresses. The adapter is the string -name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. +name of the adapter, as shown in /sys/bus/i2c/devices/i2c-<n>/name. It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring spaces, so if the name is "This is an I2C chip" you can say adapter_name=ThisisanI2cchip. This is because it's hard to pass in diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst index 02880d5552d5..198a5a8f26c2 100644 --- a/Documentation/virt/kvm/locking.rst +++ b/Documentation/virt/kvm/locking.rst @@ -9,7 +9,7 @@ KVM Lock Overview
The acquisition orders for mutexes are as follows:
-- cpus_read_lock() is taken outside kvm_lock +- cpus_read_lock() is taken outside kvm_lock and kvm_usage_lock
- kvm->lock is taken outside vcpu->mutex
@@ -24,6 +24,13 @@ The acquisition orders for mutexes are as follows: are taken on the waiting side when modifying memslots, so MMU notifiers must not take either kvm->slots_lock or kvm->slots_arch_lock.
+cpus_read_lock() vs kvm_lock: + +- Taking cpus_read_lock() outside of kvm_lock is problematic, despite that + being the official ordering, as it is quite easy to unknowingly trigger + cpus_read_lock() while holding kvm_lock. Use caution when walking vm_list, + e.g. avoid complex operations when possible. + For SRCU:
- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections @@ -227,10 +234,17 @@ time it will be set using the Dirty tracking mechanism described above. :Type: mutex :Arch: any :Protects: - vm_list - - kvm_usage_count + +``kvm_usage_lock`` +^^^^^^^^^^^^^^^^^^ + +:Type: mutex +:Arch: any +:Protects: - kvm_usage_count - hardware virtualization enable/disable -:Comment: KVM also disables CPU hotplug via cpus_read_lock() during - enable/disable. +:Comment: Exists because using kvm_lock leads to deadlock (see earlier comment + on cpus_read_lock() vs kvm_lock). Note, KVM also disables CPU hotplug via + cpus_read_lock() when enabling/disabling virtualization.
``kvm->mn_invalidate_lock`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -290,11 +304,12 @@ time it will be set using the Dirty tracking mechanism described above. wakeup.
``vendor_module_lock`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^ :Type: mutex :Arch: x86 :Protects: loading a vendor module (kvm_amd or kvm_intel) -:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is - taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and - many operations need to take cpu_hotplug_lock when loading a vendor module, - e.g. updating static calls. +:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken + in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while + cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many + operations need to take cpu_hotplug_lock when loading a vendor module, e.g. + updating static calls. diff --git a/Makefile b/Makefile index df9c90cdd1c5..7bcf0c32ea5e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 11 -SUBLEVEL = 1 +SUBLEVEL = 2 EXTRAVERSION = NAME = Baby Opossum Posse
diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi index 291540e5d81e..d077afd5024d 100644 --- a/arch/arm/boot/dts/microchip/sam9x60.dtsi +++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi @@ -1312,7 +1312,7 @@ rtt: rtc@fffffe20 { compatible = "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; reg = <0xfffffe20 0x20>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; };
pit: timer@fffffe40 { @@ -1338,7 +1338,7 @@ rtc: rtc@fffffea8 { compatible = "microchip,sam9x60-rtc", "atmel,at91sam9x5-rtc"; reg = <0xfffffea8 0x100>; interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; };
watchdog: watchdog@ffffff80 { diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi index 75778be126a3..17bcdcf0cf4a 100644 --- a/arch/arm/boot/dts/microchip/sama7g5.dtsi +++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi @@ -272,7 +272,7 @@ rtt: rtc@e001d020 { compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; reg = <0xe001d020 0x30>; interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clk32k 0>; + clocks = <&clk32k 1>; };
clk32k: clock-controller@e001d050 { diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts index cdbb8c435cd6..601d89b904cd 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts @@ -365,7 +365,7 @@ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0 };
pinctrl_tsc: tscgrp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0 MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0 MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi index 6bb12e0bbc7e..50654dbf62e0 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ull-seeed-npi-dev-board.dtsi @@ -339,14 +339,14 @@ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x120b0 };
pinctrl_uart1: uart1grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1 MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1 >; };
pinctrl_uart2: uart2grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1 MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1 MX6UL_PAD_UART2_CTS_B__UART2_DCE_CTS 0x1b0b1 @@ -355,7 +355,7 @@ MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS 0x1b0b1 };
pinctrl_uart3: uart3grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX 0x1b0b1 MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX 0x1b0b1 MX6UL_PAD_UART3_CTS_B__UART3_DCE_CTS 0x1b0b1 @@ -364,21 +364,21 @@ MX6UL_PAD_UART3_RTS_B__UART3_DCE_RTS 0x1b0b1 };
pinctrl_uart4: uart4grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART4_TX_DATA__UART4_DCE_TX 0x1b0b1 MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX 0x1b0b1 >; };
pinctrl_uart5: uart5grp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX 0x1b0b1 MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x1b0b1 >; };
pinctrl_usb_otg1_id: usbotg1idgrp { - fsl,pin = < + fsl,pins = < MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID 0x17059 >; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts index 521493342fe9..8f5566027c25 100644 --- a/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts +++ b/arch/arm/boot/dts/nxp/imx/imx7d-zii-rmu2.dts @@ -350,7 +350,7 @@ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59
&iomuxc_lpsr { pinctrl_enet1_phy_interrupt: enet1phyinterruptgrp { - fsl,phy = < + fsl,pins = < MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08 >; }; diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c index 85a496ddc619..e9f72a529b50 100644 --- a/arch/arm/mach-ep93xx/clock.c +++ b/arch/arm/mach-ep93xx/clock.c @@ -359,7 +359,7 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw, u32 val = __raw_readl(psc->reg); u8 index = (val & psc->mask) >> psc->shift;
- if (index > psc->num_div) + if (index >= psc->num_div) return 0;
return DIV_ROUND_UP_ULL(parent_rate, psc->div[index]); diff --git a/arch/arm/mach-versatile/platsmp-realview.c b/arch/arm/mach-versatile/platsmp-realview.c index 6965a1de727b..d38b2e174257 100644 --- a/arch/arm/mach-versatile/platsmp-realview.c +++ b/arch/arm/mach-versatile/platsmp-realview.c @@ -70,6 +70,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) return; } map = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(map)) { pr_err("PLATSMP: No syscon regmap\n"); return; diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h index 3c7938fd40aa..32090b0fb250 100644 --- a/arch/arm/vfp/vfpinstr.h +++ b/arch/arm/vfp/vfpinstr.h @@ -64,33 +64,37 @@
#ifdef CONFIG_AS_VFP_VMRS_FPINST
-#define fmrx(_vfp_) ({ \ - u32 __v; \ - asm(".fpu vfpv2\n" \ - "vmrs %0, " #_vfp_ \ - : "=r" (__v) : : "cc"); \ - __v; \ - }) - -#define fmxr(_vfp_,_var_) \ - asm(".fpu vfpv2\n" \ - "vmsr " #_vfp_ ", %0" \ - : : "r" (_var_) : "cc") +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm volatile (".fpu vfpv2\n" \ + "vmrs %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ + __v; \ +}) + +#define fmxr(_vfp_, _var_) ({ \ + asm volatile (".fpu vfpv2\n" \ + "vmsr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc"); \ +})
#else
#define vfpreg(_vfp_) #_vfp_
-#define fmrx(_vfp_) ({ \ - u32 __v; \ - asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ - : "=r" (__v) : : "cc"); \ - __v; \ - }) - -#define fmxr(_vfp_,_var_) \ - asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ - : : "r" (_var_) : "cc") +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \ + "cr0, 0 @ fmrx %0, " #_vfp_ \ + : "=r" (__v) : : "cc"); \ + __v; \ +}) + +#define fmxr(_vfp_, _var_) ({ \ + asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \ + "cr0, 0 @ fmxr " #_vfp_ ", %0" \ + : : "r" (_var_) : "cc"); \ +})
#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a2f8ff354ca6..c8cba20a4d11 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -423,7 +423,7 @@ config AMPERE_ERRATUM_AC03_CPU_38 default y help This option adds an alternative code sequence to work around Ampere - erratum AC03_CPU_38 on AmpereOne. + errata AC03_CPU_38 and AC04_CPU_10 on AmpereOne.
The affected design reports FEAT_HAFDBS as not implemented in ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0 diff --git a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts index 47a389d9ff7d..9d74fa6bfed9 100644 --- a/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts +++ b/arch/arm64/boot/dts/exynos/exynos7885-jackpotlte.dts @@ -32,7 +32,7 @@ memory@80000000 { device_type = "memory"; reg = <0x0 0x80000000 0x3da00000>, <0x0 0xc0000000 0x40000000>, - <0x8 0x80000000 0x40000000>; + <0x8 0x80000000 0x80000000>; };
gpio-keys { diff --git a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi index afdab5724eaa..eccbd7b597bb 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8186-corsola.dtsi @@ -353,7 +353,8 @@ &dpi { pinctrl-names = "default", "sleep"; pinctrl-0 = <&dpi_pins_default>; pinctrl-1 = <&dpi_pins_sleep>; - status = "okay"; + /* TODO Re-enable after DP to Type-C port muxing can be described */ + status = "disabled"; };
&dpi_out { diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi index 4763ed5dc86c..d63a9defe73e 100644 --- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi @@ -731,7 +731,7 @@ opp-850000000 { opp-900000000-3 { opp-hz = /bits/ 64 <900000000>; opp-microvolt = <850000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; };
opp-900000000-4 { @@ -743,13 +743,13 @@ opp-900000000-4 { opp-900000000-5 { opp-hz = /bits/ 64 <900000000>; opp-microvolt = <825000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; };
opp-950000000-3 { opp-hz = /bits/ 64 <950000000>; opp-microvolt = <900000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; };
opp-950000000-4 { @@ -761,13 +761,13 @@ opp-950000000-4 { opp-950000000-5 { opp-hz = /bits/ 64 <950000000>; opp-microvolt = <850000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; };
opp-1000000000-3 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <950000>; - opp-supported-hw = <0x8>; + opp-supported-hw = <0xcf>; };
opp-1000000000-4 { @@ -779,7 +779,7 @@ opp-1000000000-4 { opp-1000000000-5 { opp-hz = /bits/ 64 <1000000000>; opp-microvolt = <875000>; - opp-supported-hw = <0x30>; + opp-supported-hw = <0x20>; }; };
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi index fe5400e17b0f..d3a52acbe48a 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi @@ -1404,6 +1404,7 @@ &xhci1 { rx-fifo-depth = <3072>; vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&usb_vbus>; + mediatek,u3p-dis-msk = <1>; };
&xhci2 { diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index 2ee45752583c..98c15eb68589 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -3251,10 +3251,10 @@ dp_intf0: dp-intf@1c015000 { compatible = "mediatek,mt8195-dp-intf"; reg = <0 0x1c015000 0 0x1000>; interrupts = <GIC_SPI 657 IRQ_TYPE_LEVEL_HIGH 0>; - clocks = <&vdosys0 CLK_VDO0_DP_INTF0>, - <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, + clocks = <&vdosys0 CLK_VDO0_DP_INTF0_DP_INTF>, + <&vdosys0 CLK_VDO0_DP_INTF0>, <&apmixedsys CLK_APMIXED_TVDPLL1>; - clock-names = "engine", "pixel", "pll"; + clock-names = "pixel", "engine", "pll"; status = "disabled"; };
@@ -3521,10 +3521,10 @@ dp_intf1: dp-intf@1c113000 { reg = <0 0x1c113000 0 0x1000>; interrupts = <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH 0>; power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>; - clocks = <&vdosys1 CLK_VDO1_DP_INTF0_MM>, - <&vdosys1 CLK_VDO1_DPINTF>, + clocks = <&vdosys1 CLK_VDO1_DPINTF>, + <&vdosys1 CLK_VDO1_DP_INTF0_MM>, <&apmixedsys CLK_APMIXED_TVDPLL2>; - clock-names = "engine", "pixel", "pll"; + clock-names = "pixel", "engine", "pll"; status = "disabled"; };
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts index 4b5f6cf16f70..096fa999aa59 100644 --- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts +++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts @@ -898,6 +898,7 @@ &xhci1 { usb2-lpm-disable; vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&vsys>; + mediatek,u3p-dis-msk = <1>; status = "okay"; };
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi index 553fa4ba1cd4..62c4fdad0b60 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3701-0008.dtsi @@ -44,39 +44,6 @@ i2c@c240000 { status = "okay"; };
- i2c@c250000 { - power-sensor@41 { - compatible = "ti,ina3221"; - reg = <0x41>; - #address-cells = <1>; - #size-cells = <0>; - - input@0 { - reg = <0x0>; - label = "CVB_ATX_12V"; - shunt-resistor-micro-ohms = <2000>; - }; - - input@1 { - reg = <0x1>; - label = "CVB_ATX_3V3"; - shunt-resistor-micro-ohms = <2000>; - }; - - input@2 { - reg = <0x2>; - label = "CVB_ATX_5V"; - shunt-resistor-micro-ohms = <2000>; - }; - }; - - power-sensor@44 { - compatible = "ti,ina219"; - reg = <0x44>; - shunt-resistor = <2000>; - }; - }; - rtc@c2a0000 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi index 527f2f3aee3a..377f518bd3e5 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002.dtsi @@ -183,6 +183,39 @@ usb@3610000 { phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3", "usb3-0", "usb3-1", "usb3-2"; }; + + i2c@c250000 { + power-sensor@41 { + compatible = "ti,ina3221"; + reg = <0x41>; + #address-cells = <1>; + #size-cells = <0>; + + input@0 { + reg = <0x0>; + label = "CVB_ATX_12V"; + shunt-resistor-micro-ohms = <2000>; + }; + + input@1 { + reg = <0x1>; + label = "CVB_ATX_3V3"; + shunt-resistor-micro-ohms = <2000>; + }; + + input@2 { + reg = <0x2>; + label = "CVB_ATX_5V"; + shunt-resistor-micro-ohms = <2000>; + }; + }; + + power-sensor@44 { + compatible = "ti,ina219"; + reg = <0x44>; + shunt-resistor = <2000>; + }; + }; };
vdd_3v3_dp: regulator-vdd-3v3-dp { diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 23f1b2e5e624..95691ab58a23 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -3070,6 +3070,7 @@ apps_smmu: iommu@15000000 { reg = <0x0 0x15000000 0x0 0x100000>; #iommu-cells = <2>; #global-interrupts = <2>; + dma-coherent;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, @@ -3208,6 +3209,7 @@ pcie_smmu: iommu@15200000 { reg = <0x0 0x15200000 0x0 0x80000>; #iommu-cells = <2>; #global-interrupts = <2>; + dma-coherent;
interrupts = <GIC_SPI 920 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 921 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi index cd732ef88cd8..6174160b56c4 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi @@ -4402,14 +4402,14 @@ mdss_dp2: displayport-controller@ae9a000 {
assigned-clocks = <&dispcc DISP_CC_MDSS_DPTX2_LINK_CLK_SRC>, <&dispcc DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC>; - assigned-clock-parents = <&mdss_dp2_phy 0>, - <&mdss_dp2_phy 1>; + assigned-clock-parents = <&usb_1_ss2_qmpphy QMP_USB43DP_DP_LINK_CLK>, + <&usb_1_ss2_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
operating-points-v2 = <&mdss_dp2_opp_table>;
power-domains = <&rpmhpd RPMHPD_MMCX>;
- phys = <&mdss_dp2_phy>; + phys = <&usb_1_ss2_qmpphy QMP_USB43DP_DP_PHY>; phy-names = "dp";
#sound-dai-cells = <0>; @@ -4597,8 +4597,8 @@ dispcc: clock-controller@af00000 { <&usb_1_ss0_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, <&usb_1_ss1_qmpphy QMP_USB43DP_DP_LINK_CLK>, /* dp1 */ <&usb_1_ss1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, - <&mdss_dp2_phy 0>, /* dp2 */ - <&mdss_dp2_phy 1>, + <&usb_1_ss2_qmpphy QMP_USB43DP_DP_LINK_CLK>, /* dp2 */ + <&usb_1_ss2_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>, <&mdss_dp3_phy 0>, /* dp3 */ <&mdss_dp3_phy 1>; power-domains = <&rpmhpd RPMHPD_MMCX>; diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi index 18ef297db933..20fb5e41c598 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi @@ -210,8 +210,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index d3838e5820fc..c9b9b60a3a36 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -1043,8 +1043,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; };
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi index 1de2e5f0917d..8a9b61bd759a 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi @@ -1051,8 +1051,8 @@ gic: interrupt-controller@11900000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; + reg = <0x0 0x11900000 0 0x20000>, + <0x0 0x11940000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; };
diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi index 0d5c47a65e46..34e29463a672 100644 --- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi @@ -269,8 +269,8 @@ gic: interrupt-controller@12400000 { #interrupt-cells = <3>; #address-cells = <0>; interrupt-controller; - reg = <0x0 0x12400000 0 0x40000>, - <0x0 0x12440000 0 0x60000>; + reg = <0x0 0x12400000 0 0x20000>, + <0x0 0x12440000 0 0x40000>; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; };
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts index 294eb2de263d..f5e124b235c8 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts @@ -32,12 +32,12 @@ chosen { backlight: edp-backlight { compatible = "pwm-backlight"; power-supply = <&vcc_12v>; - pwms = <&pwm0 0 740740 0>; + pwms = <&pwm0 0 125000 0>; };
bat: battery { compatible = "simple-battery"; - charge-full-design-microamp-hours = <9800000>; + charge-full-design-microamp-hours = <10000000>; voltage-max-design-microvolt = <4350000>; voltage-min-design-microvolt = <3000000>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts index a337f547caf5..6a02db4f073f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts @@ -13,7 +13,7 @@
/ { model = "Hardkernel ODROID-M1"; - compatible = "rockchip,rk3568-odroid-m1", "rockchip,rk3568"; + compatible = "hardkernel,odroid-m1", "rockchip,rk3568";
aliases { ethernet0 = &gmac0; diff --git a/arch/arm64/boot/dts/ti/k3-am654-idk.dtso b/arch/arm64/boot/dts/ti/k3-am654-idk.dtso index 8bdb87fcbde0..1674ad564be1 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-idk.dtso +++ b/arch/arm64/boot/dts/ti/k3-am654-idk.dtso @@ -58,9 +58,7 @@ icssg0_eth: icssg0-eth { <&main_udmap 0xc107>, /* egress slice 1 */
<&main_udmap 0x4100>, /* ingress slice 0 */ - <&main_udmap 0x4101>, /* ingress slice 1 */ - <&main_udmap 0x4102>, /* mgmnt rsp slice 0 */ - <&main_udmap 0x4103>; /* mgmnt rsp slice 1 */ + <&main_udmap 0x4101>; /* ingress slice 1 */ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3", "tx1-0", "tx1-1", "tx1-2", "tx1-3", "rx0", "rx1"; @@ -126,9 +124,7 @@ icssg1_eth: icssg1-eth { <&main_udmap 0xc207>, /* egress slice 1 */
<&main_udmap 0x4200>, /* ingress slice 0 */ - <&main_udmap 0x4201>, /* ingress slice 1 */ - <&main_udmap 0x4202>, /* mgmnt rsp slice 0 */ - <&main_udmap 0x4203>; /* mgmnt rsp slice 1 */ + <&main_udmap 0x4201>; /* ingress slice 1 */ dma-names = "tx0-0", "tx0-1", "tx0-2", "tx0-3", "tx1-0", "tx1-1", "tx1-2", "tx1-3", "rx0", "rx1"; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts index a2925555fe81..fb899c99753e 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-beagleboneai64.dts @@ -123,7 +123,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { no-map; };
- c66_1_dma_memory_region: c66-dma-memory@a6000000 { + c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; @@ -135,7 +135,7 @@ c66_0_memory_region: c66-memory@a6100000 { no-map; };
- c66_0_dma_memory_region: c66-dma-memory@a7000000 { + c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts index 89fbfb21e5d3..e709edeb95cf 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts @@ -120,7 +120,7 @@ main_r5fss1_core1_memory_region: r5f-memory@a5100000 { no-map; };
- c66_1_dma_memory_region: c66-dma-memory@a6000000 { + c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; @@ -132,7 +132,7 @@ c66_0_memory_region: c66-memory@a6100000 { no-map; };
- c66_0_dma_memory_region: c66-dma-memory@a7000000 { + c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 5fd7caea4419..5a7dfeb8e8eb 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -143,6 +143,7 @@ #define APPLE_CPU_PART_M2_AVALANCHE_MAX 0x039
#define AMPERE_CPU_PART_AMPERE1 0xAC3 +#define AMPERE_CPU_PART_AMPERE1A 0xAC4
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
@@ -212,6 +213,7 @@ #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) +#define MIDR_AMPERE1A MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1A) #define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 56c148890daf..2f3d56857a97 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -10,63 +10,63 @@ #include <asm/memory.h> #include <asm/sysreg.h>
-#define ESR_ELx_EC_UNKNOWN (0x00) -#define ESR_ELx_EC_WFx (0x01) +#define ESR_ELx_EC_UNKNOWN UL(0x00) +#define ESR_ELx_EC_WFx UL(0x01) /* Unallocated EC: 0x02 */ -#define ESR_ELx_EC_CP15_32 (0x03) -#define ESR_ELx_EC_CP15_64 (0x04) -#define ESR_ELx_EC_CP14_MR (0x05) -#define ESR_ELx_EC_CP14_LS (0x06) -#define ESR_ELx_EC_FP_ASIMD (0x07) -#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ -#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ +#define ESR_ELx_EC_CP15_32 UL(0x03) +#define ESR_ELx_EC_CP15_64 UL(0x04) +#define ESR_ELx_EC_CP14_MR UL(0x05) +#define ESR_ELx_EC_CP14_LS UL(0x06) +#define ESR_ELx_EC_FP_ASIMD UL(0x07) +#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */ +#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */ /* Unallocated EC: 0x0A - 0x0B */ -#define ESR_ELx_EC_CP14_64 (0x0C) -#define ESR_ELx_EC_BTI (0x0D) -#define ESR_ELx_EC_ILL (0x0E) +#define ESR_ELx_EC_CP14_64 UL(0x0C) +#define ESR_ELx_EC_BTI UL(0x0D) +#define ESR_ELx_EC_ILL UL(0x0E) /* Unallocated EC: 0x0F - 0x10 */ -#define ESR_ELx_EC_SVC32 (0x11) -#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ -#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ +#define ESR_ELx_EC_SVC32 UL(0x11) +#define ESR_ELx_EC_HVC32 UL(0x12) /* EL2 only */ +#define ESR_ELx_EC_SMC32 UL(0x13) /* EL2 and above */ /* Unallocated EC: 0x14 */ -#define ESR_ELx_EC_SVC64 (0x15) -#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ -#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ -#define ESR_ELx_EC_SYS64 (0x18) -#define ESR_ELx_EC_SVE (0x19) -#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ +#define ESR_ELx_EC_SVC64 UL(0x15) +#define ESR_ELx_EC_HVC64 UL(0x16) /* EL2 and above */ +#define ESR_ELx_EC_SMC64 UL(0x17) /* EL2 and above */ +#define ESR_ELx_EC_SYS64 UL(0x18) +#define ESR_ELx_EC_SVE UL(0x19) +#define ESR_ELx_EC_ERET UL(0x1a) /* EL2 only */ /* Unallocated EC: 0x1B */ -#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */ -#define ESR_ELx_EC_SME (0x1D) +#define ESR_ELx_EC_FPAC UL(0x1C) /* EL1 and above */ +#define ESR_ELx_EC_SME UL(0x1D) /* Unallocated EC: 0x1E */ -#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ -#define ESR_ELx_EC_IABT_LOW (0x20) -#define ESR_ELx_EC_IABT_CUR (0x21) -#define ESR_ELx_EC_PC_ALIGN (0x22) +#define ESR_ELx_EC_IMP_DEF UL(0x1f) /* EL3 only */ +#define ESR_ELx_EC_IABT_LOW UL(0x20) +#define ESR_ELx_EC_IABT_CUR UL(0x21) +#define ESR_ELx_EC_PC_ALIGN UL(0x22) /* Unallocated EC: 0x23 */ -#define ESR_ELx_EC_DABT_LOW (0x24) -#define ESR_ELx_EC_DABT_CUR (0x25) -#define ESR_ELx_EC_SP_ALIGN (0x26) -#define ESR_ELx_EC_MOPS (0x27) -#define ESR_ELx_EC_FP_EXC32 (0x28) +#define ESR_ELx_EC_DABT_LOW UL(0x24) +#define ESR_ELx_EC_DABT_CUR UL(0x25) +#define ESR_ELx_EC_SP_ALIGN UL(0x26) +#define ESR_ELx_EC_MOPS UL(0x27) +#define ESR_ELx_EC_FP_EXC32 UL(0x28) /* Unallocated EC: 0x29 - 0x2B */ -#define ESR_ELx_EC_FP_EXC64 (0x2C) +#define ESR_ELx_EC_FP_EXC64 UL(0x2C) /* Unallocated EC: 0x2D - 0x2E */ -#define ESR_ELx_EC_SERROR (0x2F) -#define ESR_ELx_EC_BREAKPT_LOW (0x30) -#define ESR_ELx_EC_BREAKPT_CUR (0x31) -#define ESR_ELx_EC_SOFTSTP_LOW (0x32) -#define ESR_ELx_EC_SOFTSTP_CUR (0x33) -#define ESR_ELx_EC_WATCHPT_LOW (0x34) -#define ESR_ELx_EC_WATCHPT_CUR (0x35) +#define ESR_ELx_EC_SERROR UL(0x2F) +#define ESR_ELx_EC_BREAKPT_LOW UL(0x30) +#define ESR_ELx_EC_BREAKPT_CUR UL(0x31) +#define ESR_ELx_EC_SOFTSTP_LOW UL(0x32) +#define ESR_ELx_EC_SOFTSTP_CUR UL(0x33) +#define ESR_ELx_EC_WATCHPT_LOW UL(0x34) +#define ESR_ELx_EC_WATCHPT_CUR UL(0x35) /* Unallocated EC: 0x36 - 0x37 */ -#define ESR_ELx_EC_BKPT32 (0x38) +#define ESR_ELx_EC_BKPT32 UL(0x38) /* Unallocated EC: 0x39 */ -#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ +#define ESR_ELx_EC_VECTOR32 UL(0x3A) /* EL2 only */ /* Unallocated EC: 0x3B */ -#define ESR_ELx_EC_BRK64 (0x3C) +#define ESR_ELx_EC_BRK64 UL(0x3C) /* Unallocated EC: 0x3D - 0x3F */ -#define ESR_ELx_EC_MAX (0x3F) +#define ESR_ELx_EC_MAX UL(0x3F)
#define ESR_ELx_EC_SHIFT (26) #define ESR_ELx_EC_WIDTH (6) diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 8a45b7a411e0..57f76d82077e 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -320,10 +320,10 @@ struct zt_context { ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
-#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES)) +#define ZA_SIG_REGS_SIZE(vq) (((vq) * __SVE_VQ_BYTES) * ((vq) * __SVE_VQ_BYTES))
#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \ - (SVE_SIG_ZREG_SIZE(vq) * n)) + (SVE_SIG_ZREG_SIZE(vq) * (n)))
#define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) @@ -334,7 +334,7 @@ struct zt_context {
#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context)
-#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * (n))
#define ZT_SIG_CONTEXT_SIZE(n) \ (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index f6b6b4507357..dfefbdf4073a 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -456,6 +456,14 @@ static const struct midr_range erratum_spec_ssbs_list[] = { }; #endif
+#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 +static const struct midr_range erratum_ac03_cpu_38_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + MIDR_ALL_VERSIONS(MIDR_AMPERE1A), + {}, +}; +#endif + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -772,7 +780,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .desc = "AmpereOne erratum AC03_CPU_38", .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38, - ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1), + ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list), }, #endif { diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index f01f0fd7b7fe..3b3f6b56e733 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -68,7 +68,7 @@ enum ipi_msg_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, - IPI_CPU_CRASH_STOP, + IPI_CPU_STOP_NMI, IPI_TIMER, IPI_IRQ_WORK, NR_IPI, @@ -85,6 +85,8 @@ static int ipi_irq_base __ro_after_init; static int nr_ipi __ro_after_init = NR_IPI; static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
+static bool crash_stop; + static void ipi_setup(int cpu);
#ifdef CONFIG_HOTPLUG_CPU @@ -823,7 +825,7 @@ static const char *ipi_types[MAX_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", - [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_CPU_STOP_NMI] = "CPU stop NMIs", [IPI_TIMER] = "Timer broadcast interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", @@ -867,9 +869,9 @@ void arch_irq_work_raise(void) } #endif
-static void __noreturn local_cpu_stop(void) +static void __noreturn local_cpu_stop(unsigned int cpu) { - set_cpu_online(smp_processor_id(), false); + set_cpu_online(cpu, false);
local_daif_mask(); sdei_mask_local_cpu(); @@ -883,21 +885,26 @@ static void __noreturn local_cpu_stop(void) */ void __noreturn panic_smp_self_stop(void) { - local_cpu_stop(); + local_cpu_stop(smp_processor_id()); }
-#ifdef CONFIG_KEXEC_CORE -static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); -#endif - static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) { #ifdef CONFIG_KEXEC_CORE + /* + * Use local_daif_mask() instead of local_irq_disable() to make sure + * that pseudo-NMIs are disabled. The "crash stop" code starts with + * an IRQ and falls back to NMI (which might be pseudo). If the IRQ + * finally goes through right as we're timing out then the NMI could + * interrupt us. It's better to prevent the NMI and let the IRQ + * finish since the pt_regs will be better. + */ + local_daif_mask(); + crash_save_cpu(regs, cpu);
- atomic_dec(&waiting_for_crash_ipi); + set_cpu_online(cpu, false);
- local_irq_disable(); sdei_mask_local_cpu();
if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -962,14 +969,12 @@ static void do_handle_IPI(int ipinr) break;
case IPI_CPU_STOP: - local_cpu_stop(); - break; - - case IPI_CPU_CRASH_STOP: - if (IS_ENABLED(CONFIG_KEXEC_CORE)) { + case IPI_CPU_STOP_NMI: + if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) { ipi_cpu_crash_stop(cpu, get_irq_regs()); - unreachable(); + } else { + local_cpu_stop(cpu); } break;
@@ -1024,8 +1029,7 @@ static bool ipi_should_be_nmi(enum ipi_msg_type ipi) return false;
switch (ipi) { - case IPI_CPU_STOP: - case IPI_CPU_CRASH_STOP: + case IPI_CPU_STOP_NMI: case IPI_CPU_BACKTRACE: case IPI_KGDB_ROUNDUP: return true; @@ -1138,79 +1142,109 @@ static inline unsigned int num_other_online_cpus(void)
void smp_send_stop(void) { + static unsigned long stop_in_progress; + cpumask_t mask; unsigned long timeout;
- if (num_other_online_cpus()) { - cpumask_t mask; + /* + * If this cpu is the only one alive at this point in time, online or + * not, there are no stop messages to be sent around, so just back out. + */ + if (num_other_online_cpus() == 0) + goto skip_ipi;
- cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); + /* Only proceed if this is the first CPU to reach this code */ + if (test_and_set_bit(0, &stop_in_progress)) + return;
- if (system_state <= SYSTEM_RUNNING) - pr_crit("SMP: stopping secondary CPUs\n"); - smp_cross_call(&mask, IPI_CPU_STOP); - } + /* + * Send an IPI to all currently online CPUs except the CPU running + * this code. + * + * NOTE: we don't do anything here to prevent other CPUs from coming + * online after we snapshot `cpu_online_mask`. Ideally, the calling code + * should do something to prevent other CPUs from coming up. This code + * can be called in the panic path and thus it doesn't seem wise to + * grab the CPU hotplug mutex ourselves. Worst case: + * - If a CPU comes online as we're running, we'll likely notice it + * during the 1 second wait below and then we'll catch it when we try + * with an NMI (assuming NMIs are enabled) since we re-snapshot the + * mask before sending an NMI. + * - If we leave the function and see that CPUs are still online we'll + * at least print a warning. Especially without NMIs this function + * isn't foolproof anyway so calling code will just have to accept + * the fact that there could be cases where a CPU can't be stopped. + */ + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask);
- /* Wait up to one second for other CPUs to stop */ + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + + /* + * Start with a normal IPI and wait up to one second for other CPUs to + * stop. We do this first because it gives other processors a chance + * to exit critical sections / drop locks and makes the rest of the + * stop process (especially console flush) more robust. + */ + smp_cross_call(&mask, IPI_CPU_STOP); timeout = USEC_PER_SEC; while (num_other_online_cpus() && timeout--) udelay(1);
- if (num_other_online_cpus()) + /* + * If CPUs are still online, try an NMI. There's no excuse for this to + * be slow, so we only give them an extra 10 ms to respond. + */ + if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) { + smp_rmb(); + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + pr_info("SMP: retry stop with NMI for CPUs %*pbl\n", + cpumask_pr_args(&mask)); + + smp_cross_call(&mask, IPI_CPU_STOP_NMI); + timeout = USEC_PER_MSEC * 10; + while (num_other_online_cpus() && timeout--) + udelay(1); + } + + if (num_other_online_cpus()) { + smp_rmb(); + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", - cpumask_pr_args(cpu_online_mask)); + cpumask_pr_args(&mask)); + }
+skip_ipi: sdei_mask_local_cpu(); }
#ifdef CONFIG_KEXEC_CORE void crash_smp_send_stop(void) { - static int cpus_stopped; - cpumask_t mask; - unsigned long timeout; - /* * This function can be called twice in panic path, but obviously * we execute this only once. + * + * We use this same boolean to tell whether the IPI we send was a + * stop or a "crash stop". */ - if (cpus_stopped) + if (crash_stop) return; + crash_stop = 1;
- cpus_stopped = 1; + smp_send_stop();
- /* - * If this cpu is the only one alive at this point in time, online or - * not, there are no stop messages to be sent around, so just back out. - */ - if (num_other_online_cpus() == 0) - goto skip_ipi; - - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); - - pr_crit("SMP: stopping secondary CPUs\n"); - smp_cross_call(&mask, IPI_CPU_CRASH_STOP); - - /* Wait up to one second for other CPUs to stop */ - timeout = USEC_PER_SEC; - while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--) - udelay(1); - - if (atomic_read(&waiting_for_crash_ipi) > 0) - pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", - cpumask_pr_args(&mask)); - -skip_ipi: - sdei_mask_local_cpu(); sdei_handler_abort(); }
bool smp_crash_stop_failed(void) { - return (atomic_read(&waiting_for_crash_ipi) > 0); + return num_other_online_cpus() != 0; } #endif
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index e715c157c2c4..e433dfab882a 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -426,9 +426,9 @@ static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, return; }
-static __always_inline void do_ffa_mem_xfer(const u64 func_id, - struct arm_smccc_res *res, - struct kvm_cpu_context *ctxt) +static void __do_ffa_mem_xfer(const u64 func_id, + struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) { DECLARE_REG(u32, len, ctxt, 1); DECLARE_REG(u32, fraglen, ctxt, 2); @@ -440,9 +440,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, u32 offset, nr_ranges; int ret = 0;
- BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && - func_id != FFA_FN64_MEM_LEND); - if (addr_mbz || npages_mbz || fraglen > len || fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { ret = FFA_RET_INVALID_PARAMETERS; @@ -461,6 +458,11 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; }
+ if (len > ffa_desc_buf.len) { + ret = FFA_RET_NO_MEMORY; + goto out_unlock; + } + buf = hyp_buffers.tx; memcpy(buf, host_buffers.tx, fraglen);
@@ -512,6 +514,13 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out_unlock; }
+#define do_ffa_mem_xfer(fid, res, ctxt) \ + do { \ + BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \ + (fid) != FFA_FN64_MEM_LEND); \ + __do_ffa_mem_xfer((fid), (res), (ctxt)); \ + } while (0); + static void do_ffa_mem_reclaim(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) { diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index dd0bb069df4b..59e05a7aea56 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -26,7 +26,7 @@
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) -#define TCALL_CNT (MAX_BPF_JIT_REG + 2) +#define TCCNT_PTR (MAX_BPF_JIT_REG + 2) #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) #define FP_BOTTOM (MAX_BPF_JIT_REG + 4) #define ARENA_VM_START (MAX_BPF_JIT_REG + 5) @@ -63,8 +63,8 @@ static const int bpf2a64[] = { [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), [TMP_REG_3] = A64_R(12), - /* tail_call_cnt */ - [TCALL_CNT] = A64_R(26), + /* tail_call_cnt_ptr */ + [TCCNT_PTR] = A64_R(26), /* temporary register for blinding constants */ [BPF_REG_AX] = A64_R(9), [FP_BOTTOM] = A64_R(27), @@ -282,13 +282,35 @@ static bool is_lsi_offset(int offset, int scale) * mov x29, sp * stp x19, x20, [sp, #-16]! * stp x21, x22, [sp, #-16]! - * stp x25, x26, [sp, #-16]! + * stp x26, x25, [sp, #-16]! + * stp x26, x25, [sp, #-16]! * stp x27, x28, [sp, #-16]! * mov x25, sp * mov tcc, #0 * // PROLOGUE_OFFSET */
+static void prepare_bpf_tail_call_cnt(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + const bool is_main_prog = !bpf_is_subprog(prog); + const u8 ptr = bpf2a64[TCCNT_PTR]; + const u8 fp = bpf2a64[BPF_REG_FP]; + const u8 tcc = ptr; + + emit(A64_PUSH(ptr, fp, A64_SP), ctx); + if (is_main_prog) { + /* Initialize tail_call_cnt. */ + emit(A64_MOVZ(1, tcc, 0, 0), ctx); + emit(A64_PUSH(tcc, fp, A64_SP), ctx); + emit(A64_MOV(1, ptr, A64_SP), ctx); + } else { + emit(A64_PUSH(ptr, fp, A64_SP), ctx); + emit(A64_NOP, ctx); + emit(A64_NOP, ctx); + } +} + #define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0) #define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
@@ -296,7 +318,7 @@ static bool is_lsi_offset(int offset, int scale) #define POKE_OFFSET (BTI_INSNS + 1)
/* Tail call offset to jump into */ -#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8) +#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 10)
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, bool is_exception_cb, u64 arena_vm_start) @@ -308,7 +330,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, const u8 r8 = bpf2a64[BPF_REG_8]; const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; - const u8 tcc = bpf2a64[TCALL_CNT]; const u8 fpb = bpf2a64[FP_BOTTOM]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const int idx0 = ctx->idx; @@ -359,7 +380,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, /* Save callee-saved registers */ emit(A64_PUSH(r6, r7, A64_SP), ctx); emit(A64_PUSH(r8, r9, A64_SP), ctx); - emit(A64_PUSH(fp, tcc, A64_SP), ctx); + prepare_bpf_tail_call_cnt(ctx); emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx); } else { /* @@ -372,18 +393,15 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf, * callee-saved registers. The exception callback will not push * anything and re-use the main program's stack. * - * 10 registers are on the stack + * 12 registers are on the stack */ - emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx); + emit(A64_SUB_I(1, A64_SP, A64_FP, 96), ctx); }
/* Set up BPF prog stack base register */ emit(A64_MOV(1, fp, A64_SP), ctx);
if (!ebpf_from_cbpf && is_main_prog) { - /* Initialize tail_call_cnt */ - emit(A64_MOVZ(1, tcc, 0, 0), ctx); - cur_offset = ctx->idx - idx0; if (cur_offset != PROLOGUE_OFFSET) { pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", @@ -432,7 +450,8 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const u8 tmp = bpf2a64[TMP_REG_1]; const u8 prg = bpf2a64[TMP_REG_2]; - const u8 tcc = bpf2a64[TCALL_CNT]; + const u8 tcc = bpf2a64[TMP_REG_3]; + const u8 ptr = bpf2a64[TCCNT_PTR]; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) #define jmp_offset (out_offset - (cur_offset)) @@ -449,11 +468,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
/* - * if (tail_call_cnt >= MAX_TAIL_CALL_CNT) + * if ((*tail_call_cnt_ptr) >= MAX_TAIL_CALL_CNT) * goto out; - * tail_call_cnt++; + * (*tail_call_cnt_ptr)++; */ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(A64_LDR64I(tcc, ptr, 0), ctx); emit(A64_CMP(1, tcc, tmp), ctx); emit(A64_B_(A64_COND_CS, jmp_offset), ctx); emit(A64_ADD_I(1, tcc, tcc, 1), ctx); @@ -469,6 +489,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_LDR64(prg, tmp, prg), ctx); emit(A64_CBZ(1, prg, jmp_offset), ctx);
+ /* Update tail_call_cnt if the slot is populated. */ + emit(A64_STR64I(tcc, ptr, 0), ctx); + /* goto *(prog->bpf_func + prologue_offset); */ off = offsetof(struct bpf_prog, bpf_func); emit_a64_mov_i64(tmp, off, ctx); @@ -721,6 +744,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb) const u8 r8 = bpf2a64[BPF_REG_8]; const u8 r9 = bpf2a64[BPF_REG_9]; const u8 fp = bpf2a64[BPF_REG_FP]; + const u8 ptr = bpf2a64[TCCNT_PTR]; const u8 fpb = bpf2a64[FP_BOTTOM];
/* We're done with BPF stack */ @@ -738,7 +762,8 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb) /* Restore x27 and x28 */ emit(A64_POP(fpb, A64_R(28), A64_SP), ctx); /* Restore fs (x25) and x26 */ - emit(A64_POP(fp, A64_R(26), A64_SP), ctx); + emit(A64_POP(ptr, fp, A64_SP), ctx); + emit(A64_POP(ptr, fp, A64_SP), ctx);
/* Restore callee-saved register */ emit(A64_POP(r8, r9, A64_SP), ctx); diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 2584e94e2134..fda7eac23f87 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -117,7 +117,7 @@ asmlinkage int m68k_clone(struct pt_regs *regs) { /* regs will be equal to current_pt_regs() */ struct kernel_clone_args args = { - .flags = regs->d1 & ~CSIGNAL, + .flags = (u32)(regs->d1) & ~CSIGNAL, .pidfd = (int __user *)regs->d3, .child_tid = (int __user *)regs->d4, .parent_tid = (int __user *)regs->d3, diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 09ebcbdfb34f..46a4c85e85e2 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -107,6 +107,7 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10 tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)" + depends on BROKEN depends on PPC64 && CPU_LITTLE_ENDIAN && VSX select CRYPTO_LIB_AES select CRYPTO_ALGAPI diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index 2bc53c646ccd..83848b534cb1 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -39,6 +39,12 @@ #define STDX_BE stringify_in_c(stdbrx) #endif
+#ifdef CONFIG_CC_IS_CLANG +#define DS_FORM_CONSTRAINT "Z<>" +#else +#define DS_FORM_CONSTRAINT "YZ<>" +#endif + #else /* 32-bit */
/* operations for longs and pointers */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 5bf6a4d49268..d1ea554c33ed 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,6 +11,7 @@ #include <asm/cmpxchg.h> #include <asm/barrier.h> #include <asm/asm-const.h> +#include <asm/asm-compat.h>
/* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with @@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter)); else - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
return t; } @@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED)) __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter)); else - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); + __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i)); }
#define ATOMIC64_OP(op, asm_op) \ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index fd594bf6c6a9..4f5a46a77fa2 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -6,6 +6,7 @@ #include <asm/page.h> #include <asm/extable.h> #include <asm/kup.h> +#include <asm/asm-compat.h>
#ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ @@ -92,12 +93,6 @@ __pu_failed: \ : label) #endif
-#ifdef CONFIG_CC_IS_CLANG -#define DS_FORM_CONSTRAINT "Z<>" -#else -#define DS_FORM_CONSTRAINT "YZ<>" -#endif - #ifdef __powerpc64__ #ifdef CONFIG_PPC_KERNEL_PREFIXED #define __put_user_asm2_goto(x, ptr, label) \ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index ac74321b1192..c955a8196d55 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -41,12 +41,12 @@ #include "head_32.h"
.macro compare_to_kernel_boundary scratch, addr -#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 +#if CONFIG_TASK_SIZE <= 0x80000000 && MODULES_VADDR >= 0x80000000 /* By simply checking Address >= 0x80000000, we know if its a kernel address */ not. \scratch, \addr #else rlwinm \scratch, \addr, 16, 0xfff8 - cmpli cr0, \scratch, PAGE_OFFSET@h + cmpli cr0, \scratch, TASK_SIZE@h #endif .endm
@@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */ mfspr r10, SPRN_SRR0 mtspr SPRN_MD_EPN, r10 rlwinm r11, r10, 16, 0xfff8 - cmpli cr1, r11, PAGE_OFFSET@h + cmpli cr1, r11, TASK_SIZE@h mfspr r11, SPRN_M_TWB /* Get level 1 table */ blt+ cr1, 3f
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S index 48fc6658053a..894cb939cd2b 100644 --- a/arch/powerpc/kernel/vdso/gettimeofday.S +++ b/arch/powerpc/kernel/vdso/gettimeofday.S @@ -38,11 +38,7 @@ .else addi r4, r5, VDSO_DATA_OFFSET .endif -#ifdef __powerpc64__ bl CFUNC(DOTSYM(\funct)) -#else - bl \funct -#endif PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) #ifdef __powerpc64__ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1) diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 388bba0ab3e7..15d918dce27d 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -150,11 +150,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
mmu_mapin_immr();
- mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true); if (debug_pagealloc_enabled_or_kfence()) { top = boundary; } else { - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); + mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true); mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); }
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h index fa0f535bbbf0..1d85b6617508 100644 --- a/arch/riscv/include/asm/kvm_vcpu_pmu.h +++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h @@ -10,6 +10,7 @@ #define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h> +#include <asm/kvm_vcpu_insn.h> #include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI @@ -64,11 +65,11 @@ struct kvm_pmu {
#if defined(CONFIG_32BIT) #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \ +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #else #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, +{.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, #endif
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid); @@ -104,8 +105,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu); struct kvm_pmu { };
+static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) { + *val = 0; + return KVM_INSN_CONTINUE_NEXT_SEPC; + } else { + return KVM_INSN_ILLEGAL_TRAP; + } +} + #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \ -{.base = 0, .count = 0, .func = NULL }, +{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {} static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid) diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index 3348a61de7d9..2932791e9388 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -62,7 +62,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra); - while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) + while (fp && !(fp & 0x7) && entry->nr < entry->max_stack) fp = user_backtrace(entry, fp, 0); }
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index bcf41d6e0df0..2707a51b082c 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -391,19 +391,9 @@ int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num, static void kvm_pmu_clear_snapshot_area(struct kvm_vcpu *vcpu) { struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); - int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
- if (kvpmu->sdata) { - if (kvpmu->snapshot_addr != INVALID_GPA) { - memset(kvpmu->sdata, 0, snapshot_area_size); - kvm_vcpu_write_guest(vcpu, kvpmu->snapshot_addr, - kvpmu->sdata, snapshot_area_size); - } else { - pr_warn("snapshot address invalid\n"); - } - kfree(kvpmu->sdata); - kvpmu->sdata = NULL; - } + kfree(kvpmu->sdata); + kvpmu->sdata = NULL; kvpmu->snapshot_addr = INVALID_GPA; }
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 62f409d4176e..7de128be8db9 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -127,8 +127,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) run->riscv_sbi.args[3] = cp->a3; run->riscv_sbi.args[4] = cp->a4; run->riscv_sbi.args[5] = cp->a5; - run->riscv_sbi.ret[0] = cp->a0; - run->riscv_sbi.ret[1] = cp->a1; + run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED; + run->riscv_sbi.ret[1] = 0; }
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index fbadca645af7..406746666eb7 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -6,8 +6,23 @@ #define MCOUNT_INSN_SIZE 6
#ifndef __ASSEMBLY__ +#include <asm/stacktrace.h>
-unsigned long return_address(unsigned int n); +static __always_inline unsigned long return_address(unsigned int n) +{ + struct stack_frame *sf; + + if (!n) + return (unsigned long)__builtin_return_address(0); + + sf = (struct stack_frame *)current_frame_address(); + do { + sf = (struct stack_frame *)sf->back_chain; + if (!sf) + return 0; + } while (--n); + return sf->gprs[8]; +} #define ftrace_return_address(n) return_address(n)
void ftrace_caller(void); diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index e47a4be54ff8..a70f25e9c17d 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -36,7 +36,7 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls CFLAGS_dumpstack.o += -fno-optimize-sibling-calls CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o +obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o obj-y += sysinfo.o lgr.o os_info.o ctlreg.o diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 14d324865e33..ee051ad81c71 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -183,12 +183,15 @@ void __do_early_pgm_check(struct pt_regs *regs)
static noinline __init void setup_lowcore_early(void) { + struct lowcore *lc = get_lowcore(); psw_t psw;
psw.addr = (unsigned long)early_pgm_check_handler; psw.mask = PSW_KERNEL_BITS; - get_lowcore()->program_new_psw = psw; - get_lowcore()->preempt_count = INIT_PREEMPT_COUNT; + lc->program_new_psw = psw; + lc->preempt_count = INIT_PREEMPT_COUNT; + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); }
static __init void detect_diag9c(void) diff --git a/arch/s390/kernel/earlypgm.S b/arch/s390/kernel/earlypgm.S deleted file mode 100644 index c634871f0d90..000000000000 --- a/arch/s390/kernel/earlypgm.S +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright IBM Corp. 2006, 2007 - * Author(s): Michael Holzheu holzheu@de.ibm.com - */ - -#include <linux/linkage.h> -#include <asm/asm-offsets.h> - -SYM_CODE_START(early_pgm_check_handler) - stmg %r8,%r15,__LC_SAVE_AREA_SYNC - aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) - la %r11,STACK_FRAME_OVERHEAD(%r15) - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - stmg %r0,%r7,__PT_R0(%r11) - mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC - lgr %r2,%r11 - brasl %r14,__do_early_pgm_check - mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) - lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) - lpswe __LC_RETURN_PSW -SYM_CODE_END(early_pgm_check_handler) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 749410cfdbc0..6539ec4800cd 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -599,6 +599,22 @@ SYM_CODE_START(restart_int_handler) 3: j 3b SYM_CODE_END(restart_int_handler)
+SYM_CODE_START(early_pgm_check_handler) + STMG_LC %r8,%r15,__LC_SAVE_AREA_SYNC + GET_LC %r13 + aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC(%r13) + lgr %r2,%r11 + brasl %r14,__do_early_pgm_check + mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) + LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE +SYM_CODE_END(early_pgm_check_handler) + .section .kprobes.text, "ax"
#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 640363b2a105..9f59837d159e 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -162,22 +162,3 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, { arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); } - -unsigned long return_address(unsigned int n) -{ - struct unwind_state state; - unsigned long addr; - - /* Increment to skip current stack entry */ - n++; - - unwind_for_each_frame(&state, NULL, NULL, 0) { - addr = unwind_get_return_address(&state); - if (!addr) - break; - if (!n--) - return addr; - } - return 0; -} -EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/um/Kconfig b/arch/um/Kconfig index dca84fd6d00a..c89575d05021 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -11,7 +11,6 @@ config UML select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER - select ARCH_NO_PREEMPT_DYNAMIC select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index da8b66dce0da..327c45c5013f 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -16,6 +16,7 @@ #include <asm/insn-eval.h> #include <asm/pgtable.h> #include <asm/set_memory.h> +#include <asm/traps.h>
/* MMIO direction */ #define EPT_READ 0 @@ -433,6 +434,11 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve) return -EINVAL; }
+ if (!fault_in_kernel_space(ve->gla)) { + WARN_ONCE(1, "Access to userspace address is not supported"); + return -EINVAL; + } + /* * Reject EPT violation #VEs that split pages. * diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index cd37de5ec404..d63ba9eaba3e 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags) err = skcipher_walk_aead_encrypt(&walk, req, false); else err = skcipher_walk_aead_decrypt(&walk, req, false); + if (err) + return err;
/* * Since the AES-GCM assembly code requires that at least three assembly @@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags) gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
/* En/decrypt the data and pass the ciphertext through GHASH. */ - while ((nbytes = walk.nbytes) != 0) { - if (unlikely(nbytes < walk.total)) { - /* - * Non-last segment. In this case, the assembly - * function requires that the length be a multiple of 16 - * (AES_BLOCK_SIZE) bytes. The needed buffering of up - * to 16 bytes is handled by the skcipher_walk. Here we - * just need to round down to a multiple of 16. - */ - nbytes = round_down(nbytes, AES_BLOCK_SIZE); - aes_gcm_update(key, le_ctr, ghash_acc, - walk.src.virt.addr, walk.dst.virt.addr, - nbytes, flags); - le_ctr[0] += nbytes / AES_BLOCK_SIZE; - kernel_fpu_end(); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - kernel_fpu_begin(); - } else { - /* Last segment: process all remaining data. */ - aes_gcm_update(key, le_ctr, ghash_acc, - walk.src.virt.addr, walk.dst.virt.addr, - nbytes, flags); - err = skcipher_walk_done(&walk, 0); - /* - * The low word of the counter isn't used by the - * finalize, so there's no need to increment it here. - */ - } + while (unlikely((nbytes = walk.nbytes) < walk.total)) { + /* + * Non-last segment. In this case, the assembly function + * requires that the length be a multiple of 16 (AES_BLOCK_SIZE) + * bytes. The needed buffering of up to 16 bytes is handled by + * the skcipher_walk. Here we just need to round down to a + * multiple of 16. + */ + nbytes = round_down(nbytes, AES_BLOCK_SIZE); + aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, flags); + le_ctr[0] += nbytes / AES_BLOCK_SIZE; + kernel_fpu_end(); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + return err; + kernel_fpu_begin(); } - if (err) - goto out; + /* Last segment: process all remaining data. */ + aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, flags); + /* + * The low word of the counter isn't used by the finalize, so there's no + * need to increment it here. + */
/* Finalize */ taglen = crypto_aead_authsize(tfm); @@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags) datalen, tag, taglen, flags)) err = -EBADMSG; } -out: kernel_fpu_end(); + if (nbytes) + skcipher_walk_done(&walk, 0); return err; }
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9e519d8a810a..d879478db3f5 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3972,8 +3972,12 @@ static int intel_pmu_hw_config(struct perf_event *event) x86_pmu.pebs_aliases(event); }
- if (needs_branch_stack(event) && is_sampling_event(event)) - event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + if (needs_branch_stack(event)) { + /* Avoid branch stack setup for counting events in SAMPLE READ */ + if (is_sampling_event(event) || + !(event->attr.sample_type & PERF_SAMPLE_READ)) + event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + }
if (branch_sample_counters(event)) { struct perf_event *leader, *sibling; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index b4aa8daa4773..2959970dd10e 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1606,6 +1606,7 @@ static void pt_event_stop(struct perf_event *event, int mode) * see comment in intel_pt_interrupt(). */ WRITE_ONCE(pt->handle_nmi, 0); + barrier();
pt_config_stop(event);
@@ -1657,11 +1658,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, return 0;
/* - * Here, handle_nmi tells us if the tracing is on + * There is no PT interrupt in this mode, so stop the trace and it will + * remain stopped while the buffer is copied. */ - if (READ_ONCE(pt->handle_nmi)) - pt_config_stop(event); - + pt_config_stop(event); pt_read_offset(buf); pt_update_head(pt);
@@ -1673,11 +1673,10 @@ static long pt_event_snapshot_aux(struct perf_event *event, ret = perf_output_copy_aux(&pt->handle, handle, from, to);
/* - * If the tracing was on when we turned up, restart it. - * Compiler barrier not needed as we couldn't have been - * preempted by anything that touches pt->handle_nmi. + * Here, handle_nmi tells us if the tracing was on. + * If the tracing was on, restart it. */ - if (pt->handle_nmi) + if (READ_ONCE(pt->handle_nmi)) pt_config_start(event);
return ret; diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 21bc53f5ed0c..5ab1a4598d00 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -174,6 +174,14 @@ void acpi_generic_reduced_hw_init(void); void x86_default_set_root_pointer(u64 addr); u64 x86_default_get_root_pointer(void);
+#ifdef CONFIG_XEN_PV +/* A Xen PV domain needs a special acpi_os_ioremap() handling. */ +extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, + acpi_size size); +void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +#define acpi_os_ioremap acpi_os_ioremap +#endif + #else /* !CONFIG_ACPI */
#define acpi_lapic 0 diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index c67fa6ad098a..6ffa8b75f4cd 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -69,7 +69,11 @@ extern u64 arch_irq_stat(void); #define local_softirq_pending_ref pcpu_hot.softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL) -static inline void kvm_set_cpu_l1tf_flush_l1d(void) +/* + * This function is called from noinstr interrupt contexts + * and must be inlined to not get instrumentation. + */ +static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); } @@ -84,7 +88,7 @@ static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void) return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); } #else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ -static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } +static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { } #endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
#endif /* _ASM_X86_HARDIRQ_H */ diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index d4f24499b256..ad5c68f0509d 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -212,8 +212,8 @@ __visible noinstr void func(struct pt_regs *regs, \ irqentry_state_t state = irqentry_enter(regs); \ u32 vector = (u32)(u8)error_code; \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ run_irq_on_irqstack_cond(__##func, regs, vector); \ instrumentation_end(); \ irqentry_exit(regs, state); \ @@ -250,7 +250,6 @@ static void __##func(struct pt_regs *regs); \ \ static __always_inline void instr_##func(struct pt_regs *regs) \ { \ - kvm_set_cpu_l1tf_flush_l1d(); \ run_sysvec_on_irqstack_cond(__##func, regs); \ } \ \ @@ -258,6 +257,7 @@ __visible noinstr void func(struct pt_regs *regs) \ { \ irqentry_state_t state = irqentry_enter(regs); \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ instr_##func (regs); \ instrumentation_end(); \ @@ -288,7 +288,6 @@ static __always_inline void __##func(struct pt_regs *regs); \ static __always_inline void instr_##func(struct pt_regs *regs) \ { \ __irq_enter_raw(); \ - kvm_set_cpu_l1tf_flush_l1d(); \ __##func (regs); \ __irq_exit_raw(); \ } \ @@ -297,6 +296,7 @@ __visible noinstr void func(struct pt_regs *regs) \ { \ irqentry_state_t state = irqentry_enter(regs); \ \ + kvm_set_cpu_l1tf_flush_l1d(); \ instrumentation_begin(); \ instr_##func (regs); \ instrumentation_end(); \ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4a68cb3eba78..b4bcd5108079 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1727,6 +1727,8 @@ struct kvm_x86_ops { void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); + + const bool x2apic_icr_is_split; const unsigned long required_apicv_inhibits; bool allow_apicv_in_x2apic_without_x2apic_virtualization; void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 9f4618dcd704..4efecac49863 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1778,3 +1778,14 @@ u64 x86_default_get_root_pointer(void) { return boot_params.acpi_rsdp_addr; } + +#ifdef CONFIG_XEN_PV +void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +{ + return ioremap_cache(phys, size); +} + +void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys, acpi_size size) = + x86_acpi_os_ioremap; +EXPORT_SYMBOL_GPL(acpi_os_ioremap); +#endif diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 27892e57c4ef..6aeeb43dd3f6 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -475,24 +475,25 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) { struct sgx_epc_page *page; int nid_of_current = numa_node_id(); - int nid = nid_of_current; + int nid_start, nid;
- if (node_isset(nid_of_current, sgx_numa_mask)) { - page = __sgx_alloc_epc_page_from_node(nid_of_current); - if (page) - return page; - } - - /* Fall back to the non-local NUMA nodes: */ - while (true) { - nid = next_node_in(nid, sgx_numa_mask); - if (nid == nid_of_current) - break; + /* + * Try local node first. If it doesn't have an EPC section, + * fall back to the non-local NUMA nodes. + */ + if (node_isset(nid_of_current, sgx_numa_mask)) + nid_start = nid_of_current; + else + nid_start = next_node_in(nid_of_current, sgx_numa_mask);
+ nid = nid_start; + do { page = __sgx_alloc_epc_page_from_node(nid); if (page) return page; - } + + nid = next_node_in(nid, sgx_numa_mask); + } while (nid != nid_start);
return ERR_PTR(-ENOMEM); } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index a817ed0724d1..4b9d4557fc94 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -559,10 +559,11 @@ void early_setup_idt(void) */ void __head startup_64_setup_gdt_idt(void) { + struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt); void *handler = NULL;
struct desc_ptr startup_gdt_descr = { - .address = (unsigned long)&RIP_REL_REF(init_per_cpu_var(gdt_page.gdt)), + .address = (unsigned long)&RIP_REL_REF(*gdt), .size = GDT_SIZE - 1, };
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index df337860612d..cd8ed1edbf9e 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/serial_8250.h> +#include <linux/acpi.h> #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/acpi.h> diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index c94dec6a1834..1f54eedc3015 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <linux/dmi.h> #include <linux/range.h> +#include <linux/acpi.h>
#include <asm/pci-direct.h> #include <linux/sort.h> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6d3d20e3e43a..d8d582b750d4 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -798,6 +798,27 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
#define LAM_U57_BITS 6
+static void enable_lam_func(void *__mm) +{ + struct mm_struct *mm = __mm; + + if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { + write_cr3(__read_cr3() | mm->context.lam_cr3_mask); + set_tlbstate_lam_mode(mm); + } +} + +static void mm_enable_lam(struct mm_struct *mm) +{ + /* + * Even though the process must still be single-threaded at this + * point, kernel threads may be using the mm. IPI those kernel + * threads if they exist. + */ + on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); + set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); +} + static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) @@ -814,6 +835,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) if (mmap_write_lock_killable(mm)) return -EINTR;
+ /* + * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from + * being enabled unless the process is single threaded: + */ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; @@ -830,9 +855,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) return -EINVAL; }
- write_cr3(__read_cr3() | mm->context.lam_cr3_mask); - set_tlbstate_lam_mode(mm); - set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); + mm_enable_lam(mm);
mmap_write_unlock(mm);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 0c35207320cb..390e4fe7433e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -60,6 +60,7 @@ #include <linux/stackprotector.h> #include <linux/cpuhotplug.h> #include <linux/mc146818rtc.h> +#include <linux/acpi.h>
#include <asm/acpi.h> #include <asm/cacheinfo.h> diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 82b128d3f309..0a2bbd674a6d 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -8,6 +8,7 @@ #include <linux/ioport.h> #include <linux/export.h> #include <linux/pci.h> +#include <linux/acpi.h>
#include <asm/acpi.h> #include <asm/bios_ebda.h> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5bb481aefcbc..b1269e2bb761 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2453,6 +2453,43 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13)) + +int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) +{ + if (data & X2APIC_ICR_RESERVED_BITS) + return 1; + + /* + * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but + * only AMD requires it to be zero, Intel essentially just ignores the + * bit. And if IPI virtualization (Intel) or x2AVIC (AMD) is enabled, + * the CPU performs the reserved bits checks, i.e. the underlying CPU + * behavior will "win". Arbitrarily clear the BUSY bit, as there is no + * sane way to provide consistent behavior with respect to hardware. + */ + data &= ~APIC_ICR_BUSY; + + kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); + if (kvm_x86_ops.x2apic_icr_is_split) { + kvm_lapic_set_reg(apic, APIC_ICR, data); + kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32); + } else { + kvm_lapic_set_reg64(apic, APIC_ICR, data); + } + trace_kvm_apic_write(APIC_ICR, data); + return 0; +} + +static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic) +{ + if (kvm_x86_ops.x2apic_icr_is_split) + return (u64)kvm_lapic_get_reg(apic, APIC_ICR) | + (u64)kvm_lapic_get_reg(apic, APIC_ICR2) << 32; + + return kvm_lapic_get_reg64(apic, APIC_ICR); +} + /* emulate APIC access in a trap manner */ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) { @@ -2470,7 +2507,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) * maybe-unecessary write, and both are in the noise anyways. */ if (apic_x2apic_mode(apic) && offset == APIC_ICR) - kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR)); + WARN_ON_ONCE(kvm_x2apic_icr_write(apic, kvm_x2apic_icr_read(apic))); else kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); } @@ -2990,18 +3027,22 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
/* * In x2APIC mode, the LDR is fixed and based on the id. And - * ICR is internally a single 64-bit register, but needs to be - * split to ICR+ICR2 in userspace for backwards compatibility. + * if the ICR is _not_ split, ICR is internally a single 64-bit + * register, but needs to be split to ICR+ICR2 in userspace for + * backwards compatibility. */ - if (set) { + if (set) *ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
- icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | - (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32; - __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr); - } else { - icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); - __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); + if (!kvm_x86_ops.x2apic_icr_is_split) { + if (set) { + icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | + (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32; + __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr); + } else { + icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); + __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); + } } }
@@ -3194,22 +3235,12 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) return 0; }
-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) -{ - data &= ~APIC_ICR_BUSY; - - kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32)); - kvm_lapic_set_reg64(apic, APIC_ICR, data); - trace_kvm_apic_write(APIC_ICR, data); - return 0; -} - static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) { u32 low;
if (reg == APIC_ICR) { - *data = kvm_lapic_get_reg64(apic, APIC_ICR); + *data = kvm_x2apic_icr_read(apic); return 0; }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5ab2c92c7331..22513133925e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5062,6 +5062,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .enable_nmi_window = svm_enable_nmi_window, .enable_irq_window = svm_enable_irq_window, .update_cr8_intercept = svm_update_cr8_intercept, + + .x2apic_icr_is_split = true, .set_virtual_apic_mode = avic_refresh_virtual_apic_mode, .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl, .apicv_post_state_restore = avic_apicv_post_state_restore, diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 0bf35ebe8a1b..a70699665e11 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -89,6 +89,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .enable_nmi_window = vmx_enable_nmi_window, .enable_irq_window = vmx_enable_irq_window, .update_cr8_intercept = vmx_update_cr8_intercept, + + .x2apic_icr_is_split = false, .set_virtual_apic_mode = vmx_set_virtual_apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 44ac64f3a047..a041d2ecd838 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -503,9 +503,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, { struct mm_struct *prev = this_cpu_read(cpu_tlbstate.loaded_mm); u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); - unsigned long new_lam = mm_lam_cr3_mask(next); bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy); unsigned cpu = smp_processor_id(); + unsigned long new_lam; u64 next_tlb_gen; bool need_flush; u16 new_asid; @@ -619,9 +619,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, cpumask_clear_cpu(cpu, mm_cpumask(prev)); }
- /* - * Start remote flushes and then read tlb_gen. - */ + /* Start receiving IPIs and then read tlb_gen (and LAM below) */ if (next != &init_mm) cpumask_set_cpu(cpu, mm_cpumask(next)); next_tlb_gen = atomic64_read(&next->context.tlb_gen); @@ -633,6 +631,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, barrier(); }
+ new_lam = mm_lam_cr3_mask(next); set_tlbstate_lam_mode(next); if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d25d81c8ecc0..074b41fafbe3 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -273,7 +273,7 @@ struct jit_context { /* Number of bytes emit_patch() needs to generate instructions */ #define X86_PATCH_SIZE 5 /* Number of bytes that will be skipped on tailcall */ -#define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) +#define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
static void push_r12(u8 **pprog) { @@ -403,6 +403,37 @@ static void emit_cfi(u8 **pprog, u32 hash) *pprog = prog; }
+static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) +{ + u8 *prog = *pprog; + + if (!is_subprog) { + /* cmp rax, MAX_TAIL_CALL_CNT */ + EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); + EMIT2(X86_JA, 6); /* ja 6 */ + /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. + * case1: entry of main prog. + * case2: tail callee of main prog. + */ + EMIT1(0x50); /* push rax */ + /* Make rax as tail_call_cnt_ptr. */ + EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ + EMIT2(0xEB, 1); /* jmp 1 */ + /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. + * case: tail callee of subprog. + */ + EMIT1(0x50); /* push rax */ + /* push tail_call_cnt_ptr */ + EMIT1(0x50); /* push rax */ + } else { /* is_subprog */ + /* rax is tail_call_cnt_ptr. */ + EMIT1(0x50); /* push rax */ + EMIT1(0x50); /* push rax */ + } + + *pprog = prog; +} + /* * Emit x86-64 prologue code for BPF program. * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes @@ -424,10 +455,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, /* When it's the entry of the whole tailcall context, * zeroing rax means initialising tail_call_cnt. */ - EMIT2(0x31, 0xC0); /* xor eax, eax */ + EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ else /* Keep the same instruction layout. */ - EMIT2(0x66, 0x90); /* nop2 */ + emit_nops(&prog, 3); /* nop3 */ } /* Exception callback receives FP as third parameter */ if (is_exception_cb) { @@ -453,7 +484,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, if (stack_depth) EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); if (tail_call_reachable) - EMIT1(0x50); /* push rax */ + emit_prologue_tail_call(&prog, is_subprog); *pprog = prog; }
@@ -589,13 +620,15 @@ static void emit_return(u8 **pprog, u8 *ip) *pprog = prog; }
+#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) + /* * Generate the following code: * * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... * if (index >= array->map.max_entries) * goto out; - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; * prog = array->ptrs[index]; * if (prog == NULL) @@ -608,7 +641,7 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, u32 stack_depth, u8 *ip, struct jit_context *ctx) { - int tcc_off = -4 - round_up(stack_depth, 8); + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); u8 *prog = *pprog, *start = *pprog; int offset;
@@ -630,16 +663,14 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, EMIT2(X86_JBE, offset); /* jbe out */
/* - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ + EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
/* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ @@ -654,6 +685,9 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JE, offset); /* je out */
+ /* Inc tail_call_cnt if the slot is populated. */ + EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ + if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); @@ -663,6 +697,11 @@ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, pop_r12(&prog); }
+ /* Pop tail_call_cnt_ptr. */ + EMIT1(0x58); /* pop rax */ + /* Pop tail_call_cnt, if it's main prog. + * Pop tail_call_cnt_ptr, if it's subprog. + */ EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ @@ -691,21 +730,19 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, bool *callee_regs_used, u32 stack_depth, struct jit_context *ctx) { - int tcc_off = -4 - round_up(stack_depth, 8); + int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); u8 *prog = *pprog, *start = *pprog; int offset;
/* - * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) + * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) * goto out; */ - EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ - EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ + EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ + EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
offset = ctx->tail_call_direct_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ - EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ - EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
poke->tailcall_bypass = ip + (prog - start); poke->adj_off = X86_TAIL_CALL_OFFSET; @@ -715,6 +752,9 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, poke->tailcall_bypass);
+ /* Inc tail_call_cnt if the slot is populated. */ + EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ + if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); @@ -724,6 +764,11 @@ static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, pop_r12(&prog); }
+ /* Pop tail_call_cnt_ptr. */ + EMIT1(0x58); /* pop rax */ + /* Pop tail_call_cnt, if it's main prog. + * Pop tail_call_cnt_ptr, if it's subprog. + */ EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); @@ -1311,9 +1356,11 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
-/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ -#define RESTORE_TAIL_CALL_CNT(stack) \ - EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) +#define __LOAD_TCC_PTR(off) \ + EMIT3_off32(0x48, 0x8B, 0x85, off) +/* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ +#define LOAD_TAIL_CALL_CNT_PTR(stack) \ + __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) @@ -2031,7 +2078,7 @@ st: if (is_imm8(insn->off))
func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { - RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); + LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth); ip += 7; } if (!imm32) @@ -2706,6 +2753,10 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, return 0; }
+/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ +#define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ + __LOAD_TCC_PTR(-round_up(stack, 8) - 8) + /* Example: * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); * its 'struct btf_func_model' will be nr_args=2 @@ -2826,7 +2877,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im * [ ... ] * [ stack_arg2 ] * RBP - arg_stack_off [ stack_arg1 ] - * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX + * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX */
/* room for return value of orig_call or fentry prog */ @@ -2955,10 +3006,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im save_args(m, &prog, arg_stack_off, true);
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { - /* Before calling the original function, restore the - * tail_call_cnt from stack to rax. + /* Before calling the original function, load the + * tail_call_cnt_ptr from stack to rax. */ - RESTORE_TAIL_CALL_CNT(stack_size); + LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); }
if (flags & BPF_TRAMP_F_ORIG_STACK) { @@ -3017,10 +3068,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im goto cleanup; } } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { - /* Before running the original function, restore the - * tail_call_cnt from stack to rax. + /* Before running the original function, load the + * tail_call_cnt_ptr from stack to rax. */ - RESTORE_TAIL_CALL_CNT(stack_size); + LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); }
/* restore return value of orig_call or fentry prog back into RAX */ diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b33afb240601..98a9bb92d75c 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -980,7 +980,7 @@ static void amd_rp_pme_suspend(struct pci_dev *dev) return;
rp = pcie_find_root_port(dev); - if (!rp->pm_cap) + if (!rp || !rp->pm_cap) return;
rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >> @@ -994,7 +994,7 @@ static void amd_rp_pme_resume(struct pci_dev *dev) u16 pmc;
rp = pcie_find_root_port(dev); - if (!rp->pm_cap) + if (!rp || !rp->pm_cap) return;
pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index f1ce39d6d32c..839e6613753d 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2018,10 +2018,7 @@ void __init xen_reserve_special_pages(void)
void __init xen_pt_check_e820(void) { - if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { - xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); - BUG(); - } + xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table"); }
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 7c735b730acd..b52d3e17e2c1 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -70,6 +70,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/acpi.h>
#include <asm/cache.h> #include <asm/setup.h> @@ -80,6 +81,7 @@ #include <asm/xen/hypervisor.h> #include <xen/balloon.h> #include <xen/grant_table.h> +#include <xen/hvc-console.h>
#include "xen-ops.h"
@@ -792,6 +794,102 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, return ret; }
+/* Remapped non-RAM areas */ +#define NR_NONRAM_REMAP 4 +static struct nonram_remap { + phys_addr_t maddr; + phys_addr_t paddr; + size_t size; +} xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init; +static unsigned int nr_nonram_remap __ro_after_init; + +/* + * Do the real remapping of non-RAM regions as specified in the + * xen_nonram_remap[] array. + * In case of an error just crash the system. + */ +void __init xen_do_remap_nonram(void) +{ + unsigned int i; + unsigned int remapped = 0; + const struct nonram_remap *remap = xen_nonram_remap; + unsigned long pfn, mfn, end_pfn; + + for (i = 0; i < nr_nonram_remap; i++) { + end_pfn = PFN_UP(remap->paddr + remap->size); + pfn = PFN_DOWN(remap->paddr); + mfn = PFN_DOWN(remap->maddr); + while (pfn < end_pfn) { + if (!set_phys_to_machine(pfn, mfn)) + panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n", + pfn, mfn); + + pfn++; + mfn++; + remapped++; + } + + remap++; + } + + pr_info("Remapped %u non-RAM page(s)\n", remapped); +} + +#ifdef CONFIG_ACPI +/* + * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM + * regions into account. + * Any attempt to map an area crossing a remap boundary will produce a + * WARN() splat. + * phys is related to remap->maddr on input and will be rebased to remap->paddr. + */ +static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys, + acpi_size size) +{ + unsigned int i; + const struct nonram_remap *remap = xen_nonram_remap; + + for (i = 0; i < nr_nonram_remap; i++) { + if (phys + size > remap->maddr && + phys < remap->maddr + remap->size) { + WARN_ON(phys < remap->maddr || + phys + size > remap->maddr + remap->size); + phys += remap->paddr - remap->maddr; + break; + } + } + + return x86_acpi_os_ioremap(phys, size); +} +#endif /* CONFIG_ACPI */ + +/* + * Add a new non-RAM remap entry. + * In case of no free entry found, just crash the system. + */ +void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, + unsigned long size) +{ + BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK)); + + if (nr_nonram_remap == NR_NONRAM_REMAP) { + xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n"); + BUG(); + } + +#ifdef CONFIG_ACPI + /* Switch to the Xen acpi_os_ioremap() variant. */ + if (nr_nonram_remap == 0) + acpi_os_ioremap = xen_acpi_os_ioremap; +#endif + + xen_nonram_remap[nr_nonram_remap].maddr = maddr; + xen_nonram_remap[nr_nonram_remap].paddr = paddr; + xen_nonram_remap[nr_nonram_remap].size = size; + + nr_nonram_remap++; +} + #ifdef CONFIG_XEN_DEBUG_FS #include <linux/debugfs.h> static int p2m_dump_show(struct seq_file *m, void *v) diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 806ddb2391d9..c3db71d96c43 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -15,12 +15,12 @@ #include <linux/cpuidle.h> #include <linux/cpufreq.h> #include <linux/memory_hotplug.h> +#include <linux/acpi.h>
#include <asm/elf.h> #include <asm/vdso.h> #include <asm/e820/api.h> #include <asm/setup.h> -#include <asm/acpi.h> #include <asm/numa.h> #include <asm/idtentry.h> #include <asm/xen/hypervisor.h> @@ -46,6 +46,9 @@ bool xen_pv_pci_possible; /* E820 map used during setting up memory. */ static struct e820_table xen_e820_table __initdata;
+/* Number of initially usable memory pages. */ +static unsigned long ini_nr_pages __initdata; + /* * Buffer used to remap identity mapped pages. We only need the virtual space. * The physical page behind this address is remapped as needed to different @@ -212,7 +215,7 @@ static int __init xen_free_mfn(unsigned long mfn) * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, - unsigned long end_pfn, unsigned long nr_pages) + unsigned long end_pfn) { unsigned long pfn, end; int ret; @@ -220,7 +223,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, WARN_ON(start_pfn > end_pfn);
/* Release pages first. */ - end = min(end_pfn, nr_pages); + end = min(end_pfn, ini_nr_pages); for (pfn = start_pfn; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn);
@@ -341,15 +344,14 @@ static void __init xen_do_set_identity_and_remap_chunk( * to Xen and not remapped. */ static unsigned long __init xen_set_identity_and_remap_chunk( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, - unsigned long remap_pfn) + unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) { unsigned long pfn; unsigned long i = 0; unsigned long n = end_pfn - start_pfn;
if (remap_pfn == 0) - remap_pfn = nr_pages; + remap_pfn = ini_nr_pages;
while (i < n) { unsigned long cur_pfn = start_pfn + i; @@ -358,19 +360,19 @@ static unsigned long __init xen_set_identity_and_remap_chunk( unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */ - if (cur_pfn >= nr_pages) { + if (cur_pfn >= ini_nr_pages) { /* Identity map remaining pages */ set_phys_range_identity(cur_pfn, cur_pfn + size); break; } - if (cur_pfn + size > nr_pages) - size = nr_pages - cur_pfn; + if (cur_pfn + size > ini_nr_pages) + size = ini_nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(&remap_pfn); if (!remap_range_size) { pr_warn("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages); + cur_pfn + left); break; } /* Adjust size to fit in current e820 RAM region */ @@ -397,18 +399,18 @@ static unsigned long __init xen_set_identity_and_remap_chunk( }
static unsigned long __init xen_count_remap_pages( - unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, + unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pages) { - if (start_pfn >= nr_pages) + if (start_pfn >= ini_nr_pages) return remap_pages;
- return remap_pages + min(end_pfn, nr_pages) - start_pfn; + return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; }
-static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, +static unsigned long __init xen_foreach_remap_area( unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, - unsigned long nr_pages, unsigned long last_val)) + unsigned long last_val)) { phys_addr_t start = 0; unsigned long ret_val = 0; @@ -436,8 +438,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) - ret_val = func(start_pfn, end_pfn, nr_pages, - ret_val); + ret_val = func(start_pfn, end_pfn, ret_val); start = end; } } @@ -494,6 +495,8 @@ void __init xen_remap_memory(void) set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
pr_info("Remapped %ld page(s)\n", remapped); + + xen_do_remap_nonram(); }
static unsigned long __init xen_get_pages_limit(void) @@ -567,7 +570,7 @@ static void __init xen_ignore_unusable(void) } }
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) +static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) { struct e820_entry *entry; unsigned mapcnt; @@ -624,6 +627,111 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size) return 0; }
+/* + * Swap a non-RAM E820 map entry with RAM above ini_nr_pages. + * Note that the E820 map is modified accordingly, but the P2M map isn't yet. + * The adaption of the P2M must be deferred until page allocation is possible. + */ +static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry) +{ + struct e820_entry *entry; + unsigned int mapcnt; + phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); + phys_addr_t swap_addr, swap_size, entry_end; + + swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); + swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + entry_end = entry->addr + entry->size; + if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && + entry_end - swap_size >= mem_end) { + /* Reduce RAM entry by needed space (whole pages). */ + entry->size -= swap_size; + + /* Add new entry at the end of E820 map. */ + entry = xen_e820_table.entries + + xen_e820_table.nr_entries; + xen_e820_table.nr_entries++; + + /* Fill new entry (keep size and page offset). */ + entry->type = swap_entry->type; + entry->addr = entry_end - swap_size + + swap_addr - swap_entry->addr; + entry->size = swap_entry->size; + + /* Convert old entry to RAM, align to pages. */ + swap_entry->type = E820_TYPE_RAM; + swap_entry->addr = swap_addr; + swap_entry->size = swap_size; + + /* Remember PFN<->MFN relation for P2M update. */ + xen_add_remap_nonram(swap_addr, entry_end - swap_size, + swap_size); + + /* Order E820 table and merge entries. */ + e820__update_table(&xen_e820_table); + + return; + } + + entry++; + } + + xen_raw_console_write("No suitable area found for required E820 entry remapping action\n"); + BUG(); +} + +/* + * Look for non-RAM memory types in a specific guest physical area and move + * those away if possible (ACPI NVS only for now). + */ +static void __init xen_e820_resolve_conflicts(phys_addr_t start, + phys_addr_t size) +{ + struct e820_entry *entry; + unsigned int mapcnt; + phys_addr_t end; + + if (!size) + return; + + end = start + size; + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + if (entry->addr >= end) + return; + + if (entry->addr + entry->size > start && + entry->type == E820_TYPE_NVS) + xen_e820_swap_entry_with_ram(entry); + + entry++; + } +} + +/* + * Check for an area in physical memory to be usable for non-movable purposes. + * An area is considered to usable if the used E820 map lists it to be RAM or + * some other type which can be moved to higher PFNs while keeping the MFNs. + * In case the area is not usable, crash the system with an error message. + */ +void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component) +{ + xen_e820_resolve_conflicts(start, size); + + if (!xen_is_e820_reserved(start, size)) + return; + + xen_raw_console_write("Xen hypervisor allocated "); + xen_raw_console_write(component); + xen_raw_console_write(" memory conflicts with E820 map\n"); + BUG(); +} + /* * Like memcpy, but with physical addresses for dest and src. */ @@ -683,7 +791,7 @@ static void __init xen_reserve_xen_mfnlist(void) **/ char * __init xen_memory_setup(void) { - unsigned long max_pfn, pfn_s, n_pfns; + unsigned long pfn_s, n_pfns; phys_addr_t mem_end, addr, size, chunk_size; u32 type; int rc; @@ -695,9 +803,8 @@ char * __init xen_memory_setup(void) int op;
xen_parse_512gb(); - max_pfn = xen_get_pages_limit(); - max_pfn = min(max_pfn, xen_start_info->nr_pages); - mem_end = PFN_PHYS(max_pfn); + ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages); + mem_end = PFN_PHYS(ini_nr_pages);
memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); @@ -747,13 +854,35 @@ char * __init xen_memory_setup(void) /* Make sure the Xen-supplied memory map is well-ordered. */ e820__update_table(&xen_e820_table);
+ /* + * Check whether the kernel itself conflicts with the target E820 map. + * Failing now is better than running into weird problems later due + * to relocating (and even reusing) pages with kernel text or data. + */ + xen_chk_is_e820_usable(__pa_symbol(_text), + __pa_symbol(_end) - __pa_symbol(_text), + "kernel"); + + /* + * Check for a conflict of the xen_start_info memory with the target + * E820 map. + */ + xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), + "xen_start_info"); + + /* + * Check for a conflict of the hypervisor supplied page tables with + * the target E820 map. + */ + xen_pt_check_e820(); + max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */ - max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); + max_pages += xen_foreach_remap_area(xen_count_remap_pages);
- if (max_pages > max_pfn) - extra_pages += max_pages - max_pfn; + if (max_pages > ini_nr_pages) + extra_pages += max_pages - ini_nr_pages;
/* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO @@ -762,8 +891,8 @@ char * __init xen_memory_setup(void) * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. */ - maxmem_pages = EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)); - extra_pages = min3(maxmem_pages, extra_pages, max_pages - max_pfn); + maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM)); + extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages); i = 0; addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; @@ -819,23 +948,6 @@ char * __init xen_memory_setup(void)
e820__update_table(e820_table);
- /* - * Check whether the kernel itself conflicts with the target E820 map. - * Failing now is better than running into weird problems later due - * to relocating (and even reusing) pages with kernel text or data. - */ - if (xen_is_e820_reserved(__pa_symbol(_text), - __pa_symbol(__bss_stop) - __pa_symbol(_text))) { - xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); - BUG(); - } - - /* - * Check for a conflict of the hypervisor supplied page tables with - * the target E820 map. - */ - xen_pt_check_e820(); - xen_reserve_xen_mfnlist();
/* Check for a conflict of the initrd with the target E820 map. */ @@ -863,7 +975,7 @@ char * __init xen_memory_setup(void) * Set identity map on non-RAM pages and prepare remapping the * underlying RAM. */ - xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); + xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
pr_info("Released %ld page(s)\n", xen_released_pages);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 0cf16fc79e0b..e1b782e823e6 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -47,8 +47,12 @@ void xen_mm_unpin_all(void); #ifdef CONFIG_X86_64 void __init xen_relocate_p2m(void); #endif +void __init xen_do_remap_nonram(void); +void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, + unsigned long size);
-bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); +void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, + const char *component); unsigned long __ref xen_chk_extra_mem(unsigned long pfn); void __init xen_inv_extra_mem(void); void __init xen_remap_memory(void); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 36a4998c4b37..1cc40a857fb8 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx];
/* if a merge has already been setup, then proceed with that first */ - if (bfqq->new_bfqq) - return bfqq->new_bfqq; + new_bfqq = bfqq->new_bfqq; + if (new_bfqq) { + while (new_bfqq->new_bfqq) + new_bfqq = new_bfqq->new_bfqq; + return new_bfqq; + }
/* * Check delayed stable merge for rotational or non-queueing @@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_put_queue(bfqq); }
-static void -bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, - struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd, + struct bfq_io_cq *bic, + struct bfq_queue *bfqq) { + struct bfq_queue *new_bfqq = bfqq->new_bfqq; + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", (unsigned long)new_bfqq->pid); /* Save weight raising and idle window of the merged queues */ @@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_reassign_last_bfqq(bfqq, new_bfqq);
bfq_release_process_ref(bfqd, bfqq); + + return new_bfqq; }
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, @@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, * fulfilled, i.e., bic can be redirected to new_bfqq * and bfqq can be put. */ - bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, - new_bfqq); - /* - * If we get here, bio will be queued into new_queue, - * so use new_bfqq to decide whether bio and rq can be - * merged. - */ - bfqq = new_bfqq; + while (bfqq != new_bfqq) + bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
/* * Change also bqfd->bio_bfqq, as @@ -5701,9 +5703,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, * state before killing it. */ bfqq->bic = bic; - bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); - - return new_bfqq; + return bfq_merge_bfqqs(bfqd, bic, bfqq); }
/* @@ -6158,6 +6158,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bool waiting, idle_timer_disabled = false;
if (new_bfqq) { + struct bfq_queue *old_bfqq = bfqq; /* * Release the request's reference to the old bfqq * and make sure one is taken to the shared queue. @@ -6174,18 +6175,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) * new_bfqq. */ if (bic_to_bfqq(RQ_BIC(rq), true, - bfq_actuator_index(bfqd, rq->bio)) == bfqq) - bfq_merge_bfqqs(bfqd, RQ_BIC(rq), - bfqq, new_bfqq); + bfq_actuator_index(bfqd, rq->bio)) == bfqq) { + while (bfqq != new_bfqq) + bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq); + }
- bfq_clear_bfqq_just_created(bfqq); + bfq_clear_bfqq_just_created(old_bfqq); /* * rq is about to be enqueued into new_bfqq, * release rq reference on bfqq */ - bfq_put_queue(bfqq); + bfq_put_queue(old_bfqq); rq->elv.priv[1] = new_bfqq; - bfqq = new_bfqq; }
bfq_update_io_thinktime(bfqd, bfqq); @@ -6723,7 +6724,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) { bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
- if (bfqq_process_refs(bfqq) == 1) { + if (bfqq_process_refs(bfqq) == 1 && !bfqq->new_bfqq) { bfqq->pid = current->pid; bfq_clear_bfqq_coop(bfqq); bfq_clear_bfqq_split_coop(bfqq); @@ -6821,6 +6822,31 @@ static void bfq_prepare_request(struct request *rq) rq->elv.priv[0] = rq->elv.priv[1] = NULL; }
+static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq) +{ + struct bfq_queue *new_bfqq = bfqq->new_bfqq; + struct bfq_queue *waker_bfqq = bfqq->waker_bfqq; + + if (!waker_bfqq) + return NULL; + + while (new_bfqq) { + if (new_bfqq == waker_bfqq) { + /* + * If waker_bfqq is in the merge chain, and current + * is the only procress. + */ + if (bfqq_process_refs(waker_bfqq) == 1) + return NULL; + break; + } + + new_bfqq = new_bfqq->new_bfqq; + } + + return waker_bfqq; +} + /* * If needed, init rq, allocate bfq data structures associated with * rq, and increment reference counters in the destination bfq_queue @@ -6882,7 +6908,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) /* If the queue was seeky for too long, break it apart. */ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) && !bic->bfqq_data[a_idx].stably_merged) { - struct bfq_queue *old_bfqq = bfqq; + struct bfq_queue *waker_bfqq = bfq_waker_bfqq(bfqq);
/* Update bic before losing reference to bfqq */ if (bfq_bfqq_in_large_burst(bfqq)) @@ -6902,7 +6928,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) bfqq_already_existing = true;
if (!bfqq_already_existing) { - bfqq->waker_bfqq = old_bfqq->waker_bfqq; + bfqq->waker_bfqq = waker_bfqq; bfqq->tentative_waker_bfqq = NULL;
/* @@ -6912,7 +6938,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) * woken_list of the waker. See * bfq_check_waker for details. */ - if (bfqq->waker_bfqq) + if (waker_bfqq) hlist_add_head(&bfqq->woken_list_node, &bfqq->waker_bfqq->woken_list); } @@ -6934,7 +6960,8 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) * addition, if the queue has also just been split, we have to * resume its state. */ - if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { + if (likely(bfqq != &bfqd->oom_bfqq) && !bfqq->new_bfqq && + bfqq_process_refs(bfqq) == 1) { bfqq->bic = bic; if (split) { /* diff --git a/block/blk-core.c b/block/blk-core.c index 1217c2cd66dd..bc5e8c5eaac9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -799,6 +799,7 @@ void submit_bio_noacct(struct bio *bio)
switch (bio_op(bio)) { case REQ_OP_READ: + break; case REQ_OP_WRITE: if (bio->bi_opf & REQ_ATOMIC) { status = blk_validate_atomic_write_op_size(q, bio); diff --git a/block/elevator.c b/block/elevator.c index c355b55d0107..4122026b11f1 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -715,7 +715,9 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf,
strscpy(elevator_name, buf, sizeof(elevator_name));
- return request_module("%s-iosched", strstrip(elevator_name)); + request_module("%s-iosched", strstrip(elevator_name)); + + return 0; }
ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, diff --git a/block/partitions/core.c b/block/partitions/core.c index ab76e64f0f6c..5bd7a603092e 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -555,9 +555,11 @@ static bool blk_add_partition(struct gendisk *disk,
part = add_partition(disk, p, from, size, state->parts[p].flags, &state->parts[p].info); - if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) { - printk(KERN_ERR " %s: p%d could not be added: %pe\n", - disk->disk_name, p, part); + if (IS_ERR(part)) { + if (PTR_ERR(part) != -ENXIO) { + printk(KERN_ERR " %s: p%d could not be added: %pe\n", + disk->disk_name, p, part); + } return true; }
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index a5da8ccd353e..43af5fa510c0 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -60,17 +60,18 @@ struct key *find_asymmetric_key(struct key *keyring, char *req, *p; int len;
- WARN_ON(!id_0 && !id_1 && !id_2); - if (id_0) { lookup = id_0->data; len = id_0->len; } else if (id_1) { lookup = id_1->data; len = id_1->len; - } else { + } else if (id_2) { lookup = id_2->data; len = id_2->len; + } else { + WARN_ON(1); + return ERR_PTR(-EINVAL); }
/* Construct an identifier "id:<keyid>". */ diff --git a/crypto/xor.c b/crypto/xor.c index a1363162978c..f39621a57bb3 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -83,33 +83,30 @@ static void __init do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) { int speed; - int i, j; - ktime_t min, start, diff; + unsigned long reps; + ktime_t min, start, t0;
tmpl->next = template_list; template_list = tmpl;
preempt_disable();
- min = (ktime_t)S64_MAX; - for (i = 0; i < 3; i++) { - start = ktime_get(); - for (j = 0; j < REPS; j++) { - mb(); /* prevent loop optimization */ - tmpl->do_2(BENCH_SIZE, b1, b2); - mb(); - } - diff = ktime_sub(ktime_get(), start); - if (diff < min) - min = diff; - } + reps = 0; + t0 = ktime_get(); + /* delay start until time has advanced */ + while ((start = ktime_get()) == t0) + cpu_relax(); + do { + mb(); /* prevent loop optimization */ + tmpl->do_2(BENCH_SIZE, b1, b2); + mb(); + } while (reps++ < REPS || (t0 = ktime_get()) == start); + min = ktime_sub(t0, start);
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s] - if (!min) - min = 1; - speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); + speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min); tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed); diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index f665ffd9a396..2c384bd52b9c 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -133,14 +133,15 @@ acpi_status acpi_ex_system_do_stall(u32 how_long_us) * (ACPI specifies 100 usec as max, but this gives some slack in * order to support existing BIOSs) */ - ACPI_ERROR((AE_INFO, - "Time parameter is too large (%u)", how_long_us)); + ACPI_ERROR_ONCE((AE_INFO, + "Time parameter is too large (%u)", + how_long_us)); status = AE_AML_OPERAND_VALUE; } else { if (how_long_us > 100) { - ACPI_WARNING((AE_INFO, - "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.", - how_long_us)); + ACPI_WARNING_ONCE((AE_INFO, + "Time parameter %u us > 100 us violating ACPI spec, please fix the firmware.", + how_long_us)); } acpi_os_stall(how_long_us); } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index dd3d3082c8c7..28adea68e1cd 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -171,8 +171,11 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
/* Shift and apply the mask for CPC reads/writes */ -#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \ +#define MASK_VAL_READ(reg, val) (((val) >> (reg)->bit_offset) & \ GENMASK(((reg)->bit_width) - 1, 0)) +#define MASK_VAL_WRITE(reg, prev_val, val) \ + ((((val) & GENMASK(((reg)->bit_width) - 1, 0)) << (reg)->bit_offset) | \ + ((prev_val) & ~(GENMASK(((reg)->bit_width) - 1, 0) << (reg)->bit_offset))) \
static ssize_t show_feedback_ctrs(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -859,6 +862,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */ cpc_ptr->cpu_id = pr->id; + spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */ ret = acpi_get_psd(cpc_ptr, handle); @@ -1064,7 +1068,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) }
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - *val = MASK_VAL(reg, *val); + *val = MASK_VAL_READ(reg, *val);
return 0; } @@ -1073,9 +1077,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; int size; + u64 prev_val; void __iomem *vaddr = NULL; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; + struct cpc_desc *cpc_desc;
size = GET_BIT_WIDTH(reg);
@@ -1108,8 +1114,34 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) return acpi_os_write_memory((acpi_physical_address)reg->address, val, size);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - val = MASK_VAL(reg, val); + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + cpc_desc = per_cpu(cpc_desc_ptr, cpu); + if (!cpc_desc) { + pr_debug("No CPC descriptor for CPU:%d\n", cpu); + return -ENODEV; + } + + spin_lock(&cpc_desc->rmw_lock); + switch (size) { + case 8: + prev_val = readb_relaxed(vaddr); + break; + case 16: + prev_val = readw_relaxed(vaddr); + break; + case 32: + prev_val = readl_relaxed(vaddr); + break; + case 64: + prev_val = readq_relaxed(vaddr); + break; + default: + spin_unlock(&cpc_desc->rmw_lock); + return -EFAULT; + } + val = MASK_VAL_WRITE(reg, prev_val, val); + val |= prev_val; + }
switch (size) { case 8: @@ -1136,6 +1168,9 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) break; }
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + spin_unlock(&cpc_desc->rmw_lock); + return ret_val; }
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 23373faa35ec..95a19e3569c8 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -540,8 +540,9 @@ int acpi_device_setup_files(struct acpi_device *dev) * If device has _STR, 'description' file is created */ if (acpi_has_method(dev->handle, "_STR")) { - status = acpi_evaluate_object(dev->handle, "_STR", - NULL, &buffer); + status = acpi_evaluate_object_typed(dev->handle, "_STR", + NULL, &buffer, + ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) buffer.pointer = NULL; dev->pnp.str_obj = buffer.pointer; diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c index ebd03e472955..0d1a82eeb4b0 100644 --- a/drivers/acpi/pmic/tps68470_pmic.c +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -376,10 +376,8 @@ static int tps68470_pmic_opregion_probe(struct platform_device *pdev) struct tps68470_pmic_opregion *opregion; acpi_status status;
- if (!dev || !tps68470_regmap) { - dev_warn(dev, "dev or regmap is NULL\n"); - return -EINVAL; - } + if (!tps68470_regmap) + return dev_err_probe(dev, -EINVAL, "regmap is missing\n");
if (!handle) { dev_warn(dev, "acpi handle is NULL\n"); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index df5d5a554b38..cb2aacbb9335 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -554,6 +554,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = { * to have a working keyboard. */ static const struct dmi_system_id irq1_edge_low_force_override[] = { + { + /* MECHREV Jiaolong17KS Series GM7XG0M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM7XG0M"), + }, + }, { /* XMG APEX 17 (M23) */ .matches = { @@ -572,6 +578,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = { DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), }, }, + { + /* TongFang GMxXGxX/TUXEDO Polaris 15 Gen5 AMD */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + }, { /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ .matches = { diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 674b9db7a1ef..75a5f559402f 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -549,6 +549,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), }, }, + { + .callback = video_detect_force_native, + /* Apple MacBook Pro 9,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 214b935c2ced..7df9ec9f924c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -630,6 +630,14 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { struct ata_queued_cmd *qc;
+ /* + * If the scmd was added to EH, via ata_qc_schedule_eh() -> + * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will + * have set DID_TIME_OUT (since libata does not have an abort + * handler). Thus, to clear DID_TIME_OUT, clear the host byte. + */ + set_host_byte(scmd, DID_OK); + ata_qc_for_each_raw(ap, qc, i) { if (qc->flags & ATA_QCFLAG_ACTIVE && qc->scsicmd == scmd) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 473e00a58a8b..74f7053f01a1 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1691,9 +1691,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); } else if (is_error && !have_sense) { ata_gen_ata_sense(qc); - } else { - /* Keep the SCSI ML and status byte, clear host byte. */ - cmd->result &= 0x0000ffff; }
ata_qc_done(qc); @@ -2359,7 +2356,7 @@ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, case ALL_SUB_MPAGES: n = ata_msense_control_spg0(dev, buf, changeable); n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); - n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); + n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE); n += ata_msense_control_ata_feature(dev, buf + n); return n; default: diff --git a/drivers/base/core.c b/drivers/base/core.c index 8c0733d3aad8..3b0f4b6153fc 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -4515,9 +4515,11 @@ EXPORT_SYMBOL_GPL(device_destroy); */ int device_rename(struct device *dev, const char *new_name) { + struct subsys_private *sp = NULL; struct kobject *kobj = &dev->kobj; char *old_device_name = NULL; int error; + bool is_link_renamed = false;
dev = get_device(dev); if (!dev) @@ -4532,7 +4534,7 @@ int device_rename(struct device *dev, const char *new_name) }
if (dev->class) { - struct subsys_private *sp = class_to_subsys(dev->class); + sp = class_to_subsys(dev->class);
if (!sp) { error = -EINVAL; @@ -4541,16 +4543,19 @@ int device_rename(struct device *dev, const char *new_name)
error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, new_name, kobject_namespace(kobj)); - subsys_put(sp); if (error) goto out; + + is_link_renamed = true; }
error = kobject_rename(kobj, new_name); - if (error) - goto out; - out: + if (error && is_link_renamed) + sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name, + old_device_name, kobject_namespace(kobj)); + subsys_put(sp); + put_device(dev);
kfree(old_device_name); diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index a03ee4b11134..324a9a3c087a 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -849,6 +849,26 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, {} #endif
+/* + * Reject firmware file names with ".." path components. + * There are drivers that construct firmware file names from device-supplied + * strings, and we don't want some device to be able to tell us "I would like to + * be sent my firmware from ../../../etc/shadow, please". + * + * Search for ".." surrounded by either '/' or start/end of string. + * + * This intentionally only looks at the firmware name, not at the firmware base + * directory or at symlink contents. + */ +static bool name_contains_dotdot(const char *name) +{ + size_t name_len = strlen(name); + + return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 || + strstr(name, "/../") != NULL || + (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -869,6 +889,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name, goto out; }
+ if (name_contains_dotdot(name)) { + dev_warn(device, + "Firmware load for '%s' refused, path contains '..' component\n", + name); + ret = -EINVAL; + goto out; + } + ret = _request_firmware_prepare(&fw, name, device, buf, size, offset, opt_flags); if (ret <= 0) /* error or already assigned */ @@ -946,6 +974,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, * @name will be used as $FIRMWARE in the uevent environment and * should be distinctive enough not to be confused with any other * firmware image for this or any other device. + * It must not contain any ".." path components - "foo/bar..bin" is + * allowed, but "foo/../bar.bin" is not. * * Caller must hold the reference count of @device. * diff --git a/drivers/base/module.c b/drivers/base/module.c index f742ad2a21da..c4eaa1158d54 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -66,27 +66,31 @@ int module_add_driver(struct module *mod, const struct device_driver *drv) driver_name = make_driver_name(drv); if (!driver_name) { ret = -ENOMEM; - goto out; + goto out_remove_kobj; }
module_create_drivers_dir(mk); if (!mk->drivers_dir) { ret = -EINVAL; - goto out; + goto out_free_driver_name; }
ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name); if (ret) - goto out; + goto out_remove_drivers_dir;
kfree(driver_name);
return 0; -out: - sysfs_remove_link(&drv->p->kobj, "module"); + +out_remove_drivers_dir: sysfs_remove_link(mk->drivers_dir, driver_name); + +out_free_driver_name: kfree(driver_name);
+out_remove_kobj: + sysfs_remove_link(&drv->p->kobj, "module"); return ret; }
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a9e49b212341..abafc4edf9ed 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -3399,10 +3399,12 @@ void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local) void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) { unsigned long flags; - if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) + spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); + if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) { + spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); return; + }
- spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); if (val == 0) { drbd_uuid_move_history(device); device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index e858e7e0383f..c2b6c4d9729d 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -876,7 +876,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) ns.disk == D_OUTDATED) rv = SS_CONNECTED_OUTDATES;
- else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && + else if (nc && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && (nc->verify_alg[0] == 0)) rv = SS_NO_VERIFY_ALG;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 41a90150b501..8b243144fd64 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -181,6 +181,17 @@ static void nbd_requeue_cmd(struct nbd_cmd *cmd) { struct request *req = blk_mq_rq_from_pdu(cmd);
+ lockdep_assert_held(&cmd->lock); + + /* + * Clear INFLIGHT flag so that this cmd won't be completed in + * normal completion path + * + * INFLIGHT flag will be set when the cmd is queued to nbd next + * time. + */ + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); + if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) blk_mq_requeue_request(req, true); } @@ -339,7 +350,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim = queue_limits_start_update(nbd->disk->queue); if (nbd->config->flags & NBD_FLAG_SEND_TRIM) - lim.max_hw_discard_sectors = UINT_MAX; + lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT; else lim.max_hw_discard_sectors = 0; if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) { @@ -488,8 +499,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); } - mutex_unlock(&cmd->lock); nbd_requeue_cmd(cmd); + mutex_unlock(&cmd->lock); nbd_config_put(nbd); return BLK_EH_DONE; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 1d53a3f48a0e..bca06bfb4bc3 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -71,9 +71,6 @@ struct ublk_rq_data { struct llist_node node;
struct kref ref; - __u64 sector; - __u32 operation; - __u32 nr_zones; };
struct ublk_uring_cmd_pdu { @@ -214,6 +211,33 @@ static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
#ifdef CONFIG_BLK_DEV_ZONED
+struct ublk_zoned_report_desc { + __u64 sector; + __u32 operation; + __u32 nr_zones; +}; + +static DEFINE_XARRAY(ublk_zoned_report_descs); + +static int ublk_zoned_insert_report_desc(const struct request *req, + struct ublk_zoned_report_desc *desc) +{ + return xa_insert(&ublk_zoned_report_descs, (unsigned long)req, + desc, GFP_KERNEL); +} + +static struct ublk_zoned_report_desc *ublk_zoned_erase_report_desc( + const struct request *req) +{ + return xa_erase(&ublk_zoned_report_descs, (unsigned long)req); +} + +static struct ublk_zoned_report_desc *ublk_zoned_get_report_desc( + const struct request *req) +{ + return xa_load(&ublk_zoned_report_descs, (unsigned long)req); +} + static int ublk_get_nr_zones(const struct ublk_device *ub) { const struct ublk_param_basic *p = &ub->params.basic; @@ -308,7 +332,7 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, unsigned int zones_in_request = min_t(unsigned int, remaining_zones, max_zones_per_request); struct request *req; - struct ublk_rq_data *pdu; + struct ublk_zoned_report_desc desc; blk_status_t status;
memset(buffer, 0, buffer_length); @@ -319,20 +343,23 @@ static int ublk_report_zones(struct gendisk *disk, sector_t sector, goto out; }
- pdu = blk_mq_rq_to_pdu(req); - pdu->operation = UBLK_IO_OP_REPORT_ZONES; - pdu->sector = sector; - pdu->nr_zones = zones_in_request; + desc.operation = UBLK_IO_OP_REPORT_ZONES; + desc.sector = sector; + desc.nr_zones = zones_in_request; + ret = ublk_zoned_insert_report_desc(req, &desc); + if (ret) + goto free_req;
ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, GFP_KERNEL); - if (ret) { - blk_mq_free_request(req); - goto out; - } + if (ret) + goto erase_desc;
status = blk_execute_rq(req, 0); ret = blk_status_to_errno(status); +erase_desc: + ublk_zoned_erase_report_desc(req); +free_req: blk_mq_free_request(req); if (ret) goto out; @@ -366,7 +393,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, { struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); struct ublk_io *io = &ubq->ios[req->tag]; - struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req); + struct ublk_zoned_report_desc *desc; u32 ublk_op;
switch (req_op(req)) { @@ -389,12 +416,15 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq, ublk_op = UBLK_IO_OP_ZONE_RESET_ALL; break; case REQ_OP_DRV_IN: - ublk_op = pdu->operation; + desc = ublk_zoned_get_report_desc(req); + if (!desc) + return BLK_STS_IOERR; + ublk_op = desc->operation; switch (ublk_op) { case UBLK_IO_OP_REPORT_ZONES: iod->op_flags = ublk_op | ublk_req_build_flags(req); - iod->nr_zones = pdu->nr_zones; - iod->start_sector = pdu->sector; + iod->nr_zones = desc->nr_zones; + iod->start_sector = desc->sector; return BLK_STS_OK; default: return BLK_STS_IOERR; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 51d9d4532dda..1ec71a2fb63e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1397,7 +1397,10 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) if (!urb) return -ENOMEM;
- size = le16_to_cpu(data->intr_ep->wMaxPacketSize); + /* Use maximum HCI Event size so the USB stack handles + * ZPL/short-transfer automatically. + */ + size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags); if (!buf) { diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c index b715c8ab36e8..a65c79b08804 100644 --- a/drivers/bus/arm-integrator-lm.c +++ b/drivers/bus/arm-integrator-lm.c @@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev) return -ENODEV; } map = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(map)) { dev_err(dev, "could not find Integrator/AP system controller\n"); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 14a11880bcea..71da8864841d 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -433,8 +433,7 @@ static const struct mhi_controller_config modem_foxconn_sdx72_config = {
static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .name = "foxconn-sdx55", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -444,8 +443,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = { .name = "foxconn-t99w175", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -455,8 +453,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = { .name = "foxconn-dw5930e", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -502,7 +499,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = { .name = "foxconn-t99w515", - .edl = "fox/sdx72m/edl.mbn", + .edl = "qcom/sdx72m/foxconn/edl.mbn", .edl_trigger = true, .config = &modem_foxconn_sdx72_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, @@ -513,7 +510,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = { .name = "foxconn-dw5934e", - .edl = "fox/sdx72m/edl.mbn", + .edl = "qcom/sdx72m/foxconn/edl.mbn", .edl_trigger = true, .config = &modem_foxconn_sdx72_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, @@ -680,6 +677,15 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = { .mru_default = 32768, };
+static const struct mhi_pci_dev_info mhi_telit_fe990a_info = { + .name = "telit-fe990a", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -697,9 +703,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* Telit FN990 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, - /* Telit FE990 */ + /* Telit FE990A */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), - .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 01e2e1ef82cf..ae5f3a01f554 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -555,6 +555,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG config HW_RANDOM_CN10K tristate "Marvell CN10K Random Number Generator support" depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER help This driver provides support for the True Random Number generator available in Marvell CN10K SoCs. diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index b03e80300627..aa2b135e3ee2 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng) return ret;
ret = reset_control_reset(priv->reset); - if (ret) + if (ret) { + clk_disable_unprepare(priv->clk); return ret; + }
if (priv->mask_interrupts) { /* mask the interrupt */ diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index c0d2f824769f..4c50efc46483 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -622,6 +622,7 @@ static int __maybe_unused cctrng_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cctrng_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; }
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index aa993753ab12..1e3048f2bb38 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c @@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 30b4c288c1bb..c3fbbf4d3db7 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -47,6 +47,8 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (!ret) ret = tpm2_commit_space(chip, space, buf, &len); + else + tpm2_flush_space(chip);
out_rc: return ret ? ret : len; diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index d3521aadd43e..44f60730cff4 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -1362,4 +1362,5 @@ int tpm2_sessions_init(struct tpm_chip *chip)
return rc; } +EXPORT_SYMBOL(tpm2_sessions_init); #endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 4892d491da8d..25a66870c165 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -169,6 +169,9 @@ void tpm2_flush_space(struct tpm_chip *chip) struct tpm_space *space = &chip->work_space; int i;
+ if (!space) + return; + for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context(chip, space->context_tbl[i]); diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index 91b5c6f14819..4e9594714b14 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -66,6 +66,7 @@ enum pll_component_id { PLL_COMPID_FRAC, PLL_COMPID_DIV0, PLL_COMPID_DIV1, + PLL_COMPID_MAX, };
/* @@ -165,7 +166,7 @@ static struct sama7g5_pll { u8 t; u8 eid; u8 safe_div; -} sama7g5_plls[][PLL_ID_MAX] = { +} sama7g5_plls[][PLL_COMPID_MAX] = { [PLL_ID_CPU] = { [PLL_COMPID_FRAC] = { .n = "cpupll_fracck", @@ -1038,7 +1039,7 @@ static void __init sama7g5_pmc_setup(struct device_node *np) sama7g5_pmc->chws[PMC_MAIN] = hw;
for (i = 0; i < PLL_ID_MAX; i++) { - for (j = 0; j < 3; j++) { + for (j = 0; j < PLL_COMPID_MAX; j++) { struct clk_hw *parent_hw;
if (!sama7g5_plls[i][j].n) diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c index e208ddc51133..db7f40b07d1a 100644 --- a/drivers/clk/imx/clk-composite-7ulp.c +++ b/drivers/clk/imx/clk-composite-7ulp.c @@ -14,6 +14,7 @@ #include "../clk-fractional-divider.h" #include "clk.h"
+#define PCG_PR_MASK BIT(31) #define PCG_PCS_SHIFT 24 #define PCG_PCS_MASK 0x7 #define PCG_CGC_SHIFT 30 @@ -78,6 +79,12 @@ static struct clk_hw *imx_ulp_clk_hw_composite(const char *name, struct clk_hw *hw; u32 val;
+ val = readl(reg); + if (!(val & PCG_PR_MASK)) { + pr_info("PCC PR is 0 for clk:%s, bypass\n", name); + return 0; + } + if (mux_present) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 8cc07d056a83..f187582ba491 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -204,6 +204,34 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = { .determine_rate = imx8m_clk_composite_mux_determine_rate, };
+static int imx8m_clk_composite_gate_enable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + unsigned long flags; + u32 val; + + spin_lock_irqsave(gate->lock, flags); + + val = readl(gate->reg); + val |= BIT(gate->bit_idx); + writel(val, gate->reg); + + spin_unlock_irqrestore(gate->lock, flags); + + return 0; +} + +static void imx8m_clk_composite_gate_disable(struct clk_hw *hw) +{ + /* composite clk requires the disable hook */ +} + +static const struct clk_ops imx8m_clk_composite_gate_ops = { + .enable = imx8m_clk_composite_gate_enable, + .disable = imx8m_clk_composite_gate_disable, + .is_enabled = clk_gate_is_enabled, +}; + struct clk_hw *__imx8m_clk_hw_composite(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, @@ -217,6 +245,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, struct clk_mux *mux; const struct clk_ops *divider_ops; const struct clk_ops *mux_ops; + const struct clk_ops *gate_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) @@ -257,20 +286,22 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, div->flags = CLK_DIVIDER_ROUND_CLOSEST;
/* skip registering the gate ops if M4 is enabled */ - if (!mcore_booted) { - gate = kzalloc(sizeof(*gate), GFP_KERNEL); - if (!gate) - goto free_div; - - gate_hw = &gate->hw; - gate->reg = reg; - gate->bit_idx = PCG_CGC_SHIFT; - gate->lock = &imx_ccm_lock; - } + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + goto free_div; + + gate_hw = &gate->hw; + gate->reg = reg; + gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; + if (!mcore_booted) + gate_ops = &clk_gate_ops; + else + gate_ops = &imx8m_clk_composite_gate_ops;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, mux_ops, div_hw, - divider_ops, gate_hw, &clk_gate_ops, flags); + divider_ops, gate_hw, gate_ops, flags); if (IS_ERR(hw)) goto free_gate;
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c index 81164bdcd6cc..6c6c5a30f328 100644 --- a/drivers/clk/imx/clk-composite-93.c +++ b/drivers/clk/imx/clk-composite-93.c @@ -76,6 +76,13 @@ static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
static void imx93_clk_composite_gate_disable(struct clk_hw *hw) { + /* + * Skip disable the root clock gate if mcore enabled. + * The root clock may be used by the mcore. + */ + if (mcore_booted) + return; + imx93_clk_composite_gate_endisable(hw, 0); }
@@ -222,7 +229,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ro_ops, div_hw, &clk_divider_ro_ops, NULL, NULL, flags); - } else if (!mcore_booted) { + } else { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto fail; @@ -238,12 +245,6 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p &imx93_clk_composite_divider_ops, gate_hw, &imx93_clk_composite_gate_ops, flags | CLK_SET_RATE_NO_REPARENT); - } else { - hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, - mux_hw, &imx93_clk_composite_mux_ops, div_hw, - &imx93_clk_composite_divider_ops, NULL, - &imx93_clk_composite_gate_ops, - flags | CLK_SET_RATE_NO_REPARENT); }
if (IS_ERR(hw)) diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c index 44462ab50e51..1becba2b62d0 100644 --- a/drivers/clk/imx/clk-fracn-gppll.c +++ b/drivers/clk/imx/clk-fracn-gppll.c @@ -291,6 +291,10 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw) if (val & POWERUP_MASK) return 0;
+ if (pll->flags & CLK_FRACN_GPPLL_FRACN) + writel_relaxed(readl_relaxed(pll->base + PLL_NUMERATOR), + pll->base + PLL_NUMERATOR); + val |= CLKMUX_BYPASS; writel_relaxed(val, pll->base + PLL_CTRL);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index f9394e94f69d..05c7a82b751f 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -542,8 +542,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk);
- clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET_REF]->clk); - clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF]->clk); + clk_set_parent(hws[IMX6UL_CLK_ENET1_REF_SEL]->clk, hws[IMX6UL_CLK_ENET1_REF_125M]->clk); + clk_set_parent(hws[IMX6UL_CLK_ENET2_REF_SEL]->clk, hws[IMX6UL_CLK_ENET2_REF_125M]->clk);
imx_register_uart_clocks(); } diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c index b381d6f784c8..0767d613b68b 100644 --- a/drivers/clk/imx/clk-imx8mp-audiomix.c +++ b/drivers/clk/imx/clk-imx8mp-audiomix.c @@ -154,6 +154,15 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = { PDM_SEL, 2, 0 \ }
+#define CLK_GATE_PARENT(gname, cname, pname) \ + { \ + gname"_cg", \ + IMX8MP_CLK_AUDIOMIX_##cname, \ + { .fw_name = pname, .name = pname }, NULL, 1, \ + CLKEN0 + 4 * !!(IMX8MP_CLK_AUDIOMIX_##cname / 32), \ + 1, IMX8MP_CLK_AUDIOMIX_##cname % 32 \ + } + struct clk_imx8mp_audiomix_sel { const char *name; int clkid; @@ -171,14 +180,14 @@ static struct clk_imx8mp_audiomix_sel sels[] = { CLK_GATE("earc", EARC_IPG), CLK_GATE("ocrama", OCRAMA_IPG), CLK_GATE("aud2htx", AUD2HTX_IPG), - CLK_GATE("earc_phy", EARC_PHY), + CLK_GATE_PARENT("earc_phy", EARC_PHY, "sai_pll_out_div2"), CLK_GATE("sdma2", SDMA2_ROOT), CLK_GATE("sdma3", SDMA3_ROOT), CLK_GATE("spba2", SPBA2_ROOT), CLK_GATE("dsp", DSP_ROOT), CLK_GATE("dspdbg", DSPDBG_ROOT), CLK_GATE("edma", EDMA_ROOT), - CLK_GATE("audpll", AUDPLL_ROOT), + CLK_GATE_PARENT("audpll", AUDPLL_ROOT, "osc_24m"), CLK_GATE("mu2", MU2_ROOT), CLK_GATE("mu3", MU3_ROOT), CLK_PDM, diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 670aa2bab301..e561ff7b135f 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -551,8 +551,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
- hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); - hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); + hws[IMX8MP_CLK_DRAM_ALT] = imx8m_clk_hw_fw_managed_composite("dram_alt", imx8mp_dram_alt_sels, ccm_base + 0xa000); + hws[IMX8MP_CLK_DRAM_APB] = imx8m_clk_hw_fw_managed_composite_critical("dram_apb", imx8mp_dram_apb_sels, ccm_base + 0xa080); hws[IMX8MP_CLK_VPU_G1] = imx8m_clk_hw_composite("vpu_g1", imx8mp_vpu_g1_sels, ccm_base + 0xa100); hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180); hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200); diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c index 7d8883916cac..83f2e8bd6d50 100644 --- a/drivers/clk/imx/clk-imx8qxp.c +++ b/drivers/clk/imx/clk-imx8qxp.c @@ -170,8 +170,8 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER); imx_clk_scu("elcdif_pll", IMX_SC_R_ELCDIF_PLL, IMX_SC_PM_CLK_PLL); imx_clk_scu2("lcd_clk", lcd_sels, ARRAY_SIZE(lcd_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER); - imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0); imx_clk_scu("lcd_pxl_bypass_div_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("lcd_pxl_clk", lcd_pxl_sels, ARRAY_SIZE(lcd_pxl_sels), IMX_SC_R_LCD_0, IMX_SC_PM_CLK_MISC0);
/* Audio SS */ imx_clk_scu("audio_pll0_clk", IMX_SC_R_AUDIO_PLL_0, IMX_SC_PM_CLK_PLL); @@ -206,18 +206,18 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
/* Display controller SS */ - imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); - imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc0_pll0_clk", IMX_SC_R_DC_0_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_pll1_clk", IMX_SC_R_DC_0_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc0_bypass0_clk", IMX_SC_R_DC_0_VIDEO0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("dc0_disp0_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0); + imx_clk_scu2("dc0_disp1_clk", dc0_sels, ARRAY_SIZE(dc0_sels), IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc0_bypass1_clk", IMX_SC_R_DC_0_VIDEO1, IMX_SC_PM_CLK_BYPASS);
- imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); - imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc1_pll0_clk", IMX_SC_R_DC_1_PLL_0, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_pll1_clk", IMX_SC_R_DC_1_PLL_1, IMX_SC_PM_CLK_PLL); imx_clk_scu("dc1_bypass0_clk", IMX_SC_R_DC_1_VIDEO0, IMX_SC_PM_CLK_BYPASS); + imx_clk_scu2("dc1_disp0_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC0); + imx_clk_scu2("dc1_disp1_clk", dc1_sels, ARRAY_SIZE(dc1_sels), IMX_SC_R_DC_1, IMX_SC_PM_CLK_MISC1); imx_clk_scu("dc1_bypass1_clk", IMX_SC_R_DC_1_VIDEO1, IMX_SC_PM_CLK_BYPASS);
/* MIPI-LVDS SS */ diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 31bf9d13f154..b59317e234c6 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -1832,6 +1832,58 @@ const struct clk_ops clk_alpha_pll_agera_ops = { }; EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
+/** + * clk_lucid_5lpe_pll_configure - configure the lucid 5lpe pll + * + * @pll: clk alpha pll + * @regmap: register map + * @config: configuration to apply for pll + */ +void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + /* + * If the bootloader left the PLL enabled it's likely that there are + * RCGs that will lock up if we disable the PLL below. + */ + if (trion_pll_is_enabled(pll, regmap)) { + pr_debug("Lucid 5LPE PLL is already enabled, skipping configuration\n"); + return; + } + + clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l); + regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); + clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), + config->config_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), + config->config_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), + config->config_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll), + config->user_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), + config->user_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll), + config->user_ctl_hi1_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), + config->test_ctl_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), + config->test_ctl_hi_val); + clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), + config->test_ctl_hi1_val); + + /* Disable PLL output */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); + + /* Set operation mode to OFF */ + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); + + /* Place the PLL in STANDBY mode */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); +} +EXPORT_SYMBOL_GPL(clk_lucid_5lpe_pll_configure); + static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index df8f0fe15531..e2cf5c7e501d 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -208,6 +208,8 @@ void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +void clk_lucid_5lpe_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c index 5a09009b7289..6665e064a555 100644 --- a/drivers/clk/qcom/dispcc-sm8250.c +++ b/drivers/clk/qcom/dispcc-sm8250.c @@ -1357,8 +1357,13 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev) disp_cc_sm8250_clocks[DISP_CC_MDSS_EDP_GTC_CLK_SRC] = NULL; }
- clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); - clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8350-dispcc")) { + clk_lucid_5lpe_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_5lpe_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + } else { + clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); + clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); + }
/* Enable clock gating for MDP clocks */ regmap_update_bits(regmap, 0x8000, 0x10, 0x10); diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c index 31ae46f180a5..19066ea58224 100644 --- a/drivers/clk/qcom/dispcc-sm8550.c +++ b/drivers/clk/qcom/dispcc-sm8550.c @@ -196,7 +196,7 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = { static const struct parent_map disp_cc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, - { P_DP1_PHY_PLL_VCO_DIV_CLK, 2 }, + { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, @@ -213,7 +213,7 @@ static const struct clk_parent_data disp_cc_parent_data_4[] = {
static const struct parent_map disp_cc_parent_map_5[] = { { P_BI_TCXO, 0 }, - { P_DSI0_PHY_PLL_OUT_BYTECLK, 4 }, + { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, };
@@ -400,7 +400,7 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_dp_ops, + .ops = &clk_rcg2_ops, }, };
@@ -562,7 +562,7 @@ static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -577,7 +577,7 @@ static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -1611,7 +1611,7 @@ static struct gdsc mdss_gdsc = { .name = "mdss_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, };
static struct gdsc mdss_int2_gdsc = { @@ -1620,7 +1620,7 @@ static struct gdsc mdss_int2_gdsc = { .name = "mdss_int2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE, };
static struct clk_regmap *disp_cc_sm8550_clocks[] = { diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c index f98591148a97..6a4877d88829 100644 --- a/drivers/clk/qcom/gcc-ipq5332.c +++ b/drivers/clk/qcom/gcc-ipq5332.c @@ -3388,6 +3388,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = { [GCC_QDSS_DAP_DIV_CLK_SRC] = &gcc_qdss_dap_div_clk_src.clkr, [GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr, [GCC_QDSS_EUD_AT_CLK] = &gcc_qdss_eud_at_clk.clkr, + [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr, [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr, [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr, [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr, diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index a24a35553e13..7343d2d7676b 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -409,7 +409,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(29), 0, 3, DFLAGS), DIV(0, "sclk_vop_pre", "sclk_vop_src", 0, RK2928_CLKSEL_CON(27), 8, 8, DFLAGS), - MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0, + MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
FACTOR(0, "xin12m", "xin24m", 0, 1, 2), diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c index b30279a96dc8..3027379f2fdd 100644 --- a/drivers/clk/rockchip/clk-rk3588.c +++ b/drivers/clk/rockchip/clk-rk3588.c @@ -526,7 +526,7 @@ PNAME(pmu_200m_100m_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src" }; PNAME(pmu_300m_24m_p) = { "clk_300m_src", "xin24m" }; PNAME(pmu_400m_24m_p) = { "clk_400m_src", "xin24m" }; PNAME(pmu_100m_50m_24m_src_p) = { "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; -PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "32k", "clk_pmu1_100m_src" }; +PNAME(pmu_24m_32k_100m_src_p) = { "xin24m", "xin32k", "clk_pmu1_100m_src" }; PNAME(hclk_pmu1_root_p) = { "clk_pmu1_200m_src", "clk_pmu1_100m_src", "clk_pmu1_50m_src", "xin24m" }; PNAME(hclk_pmu_cm0_root_p) = { "clk_pmu1_400m_src", "clk_pmu1_200m_src", "clk_pmu1_100m_src", "xin24m" }; PNAME(mclk_pdm0_p) = { "clk_pmu1_300m_src", "clk_pmu1_200m_src" }; diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c index 53f7af234cc2..aabd0484ac23 100644 --- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c +++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c @@ -145,7 +145,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
/* enable power domain and clocks */ pm_runtime_enable(priv->dev); - ret = pm_runtime_get_sync(priv->dev); + ret = pm_runtime_resume_and_get(priv->dev); if (ret < 0) return dev_err_probe(priv->dev, ret, "failed to turn on power\n");
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index d964e3affd42..0eab7f3e2eab 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -240,6 +240,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) }
clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); if (IS_ERR(clk)) { pr_err("%s: failed to get atl clock %d from provider\n", __func__, i); diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c index b4afe3a67583..eac4c95c6127 100644 --- a/drivers/clocksource/timer-qcom.c +++ b/drivers/clocksource/timer-qcom.c @@ -233,6 +233,7 @@ static int __init msm_dt_timer_init(struct device_node *np) }
if (of_property_read_u32(np, "clock-frequency", &freq)) { + iounmap(cpu0_base); pr_err("Unknown frequency\n"); return -EINVAL; } @@ -243,7 +244,11 @@ static int __init msm_dt_timer_init(struct device_node *np) freq /= 4; writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- return msm_timer_init(freq, 32, irq, !!percpu_offset); + ret = msm_timer_init(freq, 32, irq, !!percpu_offset); + if (ret) + iounmap(cpu0_base); + + return ret; } TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 4d3f27958fbd..62dfa42570e4 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -90,6 +90,9 @@ struct ti_cpufreq_soc_data { unsigned long efuse_shift; unsigned long rev_offset; bool multi_regulator; +/* Backward compatibility hack: Might have missing syscon */ +#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1 + u8 quirks; };
struct ti_cpufreq_data { @@ -254,6 +257,7 @@ static struct ti_cpufreq_soc_data omap34xx_soc_data = { .efuse_mask = BIT(3), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, };
/* @@ -281,6 +285,7 @@ static struct ti_cpufreq_soc_data omap36xx_soc_data = { .efuse_mask = BIT(9), .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = true, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, };
/* @@ -295,6 +300,7 @@ static struct ti_cpufreq_soc_data am3517_soc_data = { .efuse_mask = 0, .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE, .multi_regulator = false, + .quirks = TI_QUIRK_SYSCON_MAY_BE_MISSING, };
static struct ti_cpufreq_soc_data am625_soc_data = { @@ -340,7 +346,7 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset, &efuse); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->efuse_offset, 4); @@ -381,7 +387,7 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset, &revision); - if (ret == -EIO) { + if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) { /* not a syscon register! */ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE + opp_data->soc_data->rev_offset, 4); diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index a6e123dfe394..5bb3401220d2 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -8,6 +8,7 @@
#define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
+#include <linux/cleanup.h> #include <linux/cpuhotplug.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> @@ -236,19 +237,16 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, { struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu); struct device_node *state_node; - struct device_node *cpu_node; u32 *states; int i, ret;
- cpu_node = of_cpu_device_node_get(cpu); + struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu); if (!cpu_node) return -ENODEV;
states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL); - if (!states) { - ret = -ENOMEM; - goto fail; - } + if (!states) + return -ENOMEM;
/* Parse SBI specific details from state DT nodes */ for (i = 1; i < state_count; i++) { @@ -264,10 +262,8 @@ static int sbi_cpuidle_dt_init_states(struct device *dev,
pr_debug("sbi-state %#x index %d\n", states[i], i); } - if (i != state_count) { - ret = -ENODEV; - goto fail; - } + if (i != state_count) + return -ENODEV;
/* Initialize optional data, used for the hierarchical topology. */ ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu); @@ -277,10 +273,7 @@ static int sbi_cpuidle_dt_init_states(struct device *dev, /* Store states in the per-cpu struct. */ data->states = states;
-fail: - of_node_put(cpu_node); - - return ret; + return 0; }
static void sbi_cpuidle_deinit_cpu(int cpu) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index fdd724228c2f..25c02e267258 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -708,6 +708,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc;
+ sg_num = pad_sg_nents(sg_num); edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); if (!edesc) return NULL; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 9810edbb272d..a2964fb31047 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -910,7 +910,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
- reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd); + + /* + * If invoked during panic handling, local interrupts are disabled so + * the PSP command completion interrupt can't be used. + * sev_wait_cmd_ioc() already checks for interrupts disabled and + * polls for PSP command completion. Ensure we do not request an + * interrupt from the PSP if irqs disabled. + */ + if (!irqs_disabled()) + reg |= SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */ @@ -2410,6 +2421,8 @@ void sev_pci_init(void) return;
err: + sev_dev_destroy(psp_master); + psp_master->sev_data = NULL; }
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 10aa4da93323..6b536ad2ada5 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -13,9 +13,7 @@ #include <linux/uacce.h> #include "hpre.h"
-#define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) -#define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_CTRL_CNT_CLR_CE 0x301000 #define HPRE_FSM_MAX_CNT 0x301008 #define HPRE_VFG_AXQOS 0x30100c @@ -42,7 +40,6 @@ #define HPRE_HAC_INT_SET 0x301500 #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 -#define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 #define HPRE_CLSTR_BASE 0x302000 #define HPRE_CORE_EN_OFFSET 0x04 @@ -66,7 +63,6 @@ #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 -#define HPRE_TIMEOUT_ABNML_BIT 6 #define HPRE_PASID_EN_BIT 9 #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 @@ -203,9 +199,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, - {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, - {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, - {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E}, + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, @@ -358,6 +354,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, };
+static const struct hisi_qm_err_ini hpre_err_ini; + bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; @@ -654,11 +652,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
- /* HPRE need more time, we close this interrupt */ - val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); - val |= BIT(HPRE_TIMEOUT_ABNML_BIT); - writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); - if (qm->ver >= QM_HW_V3) writel(HPRE_RSA_ENB | HPRE_ECC_ENB, qm->io_base + HPRE_TYPES_ENB); @@ -667,9 +660,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); writel(0x0, qm->io_base + HPRE_BD_ENDIAN); - writel(0x0, qm->io_base + HPRE_INT_MASK); writel(0x0, qm->io_base + HPRE_POISON_BYPASS); - writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); @@ -759,7 +750,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
static void hpre_hw_error_enable(struct hisi_qm *qm) { - u32 ce, nfe; + u32 ce, nfe, err_en;
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); @@ -776,7 +767,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */ - writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); + err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE; + writel(~err_en, qm->io_base + HPRE_INT_MASK); }
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -1161,6 +1153,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; + qm->err_ini = &hpre_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } @@ -1350,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
hpre_open_sva_prefetch(qm);
- qm->err_ini = &hpre_err_ini; - qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) @@ -1380,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre) return 0; }
+static void hpre_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hpre_cnt_regs_clear(qm); + qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); + hpre_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; @@ -1405,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm); if (ret) - goto err_with_err_init; + goto err_with_probe_init;
ret = hpre_debugfs_init(qm); if (ret) @@ -1444,9 +1447,8 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
-err_with_err_init: - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_with_probe_init: + hpre_probe_uninit(qm);
err_with_qm_init: hisi_qm_uninit(qm); @@ -1468,13 +1470,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
- if (qm->fun_type == QM_HW_PF) { - hpre_cnt_regs_clear(qm); - qm->debug.curr_qm_qp_num = 0; - hpre_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); - } - + hpre_probe_uninit(qm); hisi_qm_uninit(qm); }
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f614fd228b56..07983af9e3e2 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -450,6 +450,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { };
static void qm_irqs_unregister(struct hisi_qm *qm); +static int qm_reset_device(struct hisi_qm *qm);
static u32 qm_get_hw_error_status(struct hisi_qm *qm) { @@ -4014,6 +4015,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set) return -ETIMEDOUT; }
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ + if (qm->ver >= QM_HW_V3) + return; + + if (!qm->err_status.is_dev_ecc_mbit && + qm->err_status.is_qm_ecc_mbit && + qm->err_ini->close_axi_master_ooo) { + qm->err_ini->close_axi_master_ooo(qm); + } else if (qm->err_status.is_dev_ecc_mbit && + !qm->err_status.is_qm_ecc_mbit && + !qm->err_ini->close_axi_master_ooo) { + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + } +} + static int qm_vf_reset_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { @@ -4078,6 +4101,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; }
+ qm_dev_ecc_mbit_handle(qm); + /* PF obtains the information of VF by querying the register. */ qm_cmd_uninit(qm);
@@ -4108,33 +4133,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return 0; }
-static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +static int qm_master_ooo_check(struct hisi_qm *qm) { - u32 nfe_enb = 0; + u32 val; + int ret;
- /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ - if (qm->ver >= QM_HW_V3) - return; + /* Check the ooo register of the device before resetting the device. */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
- if (!qm->err_status.is_dev_ecc_mbit && - qm->err_status.is_qm_ecc_mbit && - qm->err_ini->close_axi_master_ooo) { - qm->err_ini->close_axi_master_ooo(qm); - } else if (qm->err_status.is_dev_ecc_mbit && - !qm->err_status.is_qm_ecc_mbit && - !qm->err_ini->close_axi_master_ooo) { - nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); - writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, - qm->io_base + QM_RAS_NFE_ENABLE); - writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); - } + return ret; }
-static int qm_soft_reset(struct hisi_qm *qm) +static int qm_soft_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val;
/* Ensure all doorbells and mailboxes received by QM */ ret = qm_check_req_recv(qm); @@ -4155,30 +4173,23 @@ static int qm_soft_reset(struct hisi_qm *qm) return ret; }
- qm_dev_ecc_mbit_handle(qm); - - /* OOO register set and check */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - /* If bus lock, reset chip */ - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - }
if (qm->err_ini->close_sva_prefetch) qm->err_ini->close_sva_prefetch(qm);
ret = qm_set_pf_mse(qm, false); - if (ret) { + if (ret) pci_err(pdev, "Fails to disable pf MSE bit.\n"); - return ret; - } + + return ret; +} + +static int qm_reset_device(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev;
/* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(&pdev->dev)) { @@ -4197,12 +4208,23 @@ static int qm_soft_reset(struct hisi_qm *qm) pci_err(pdev, "Reset step %llu failed!\n", value); return -EIO; } - } else { - pci_err(pdev, "No reset method!\n"); - return -EINVAL; + + return 0; }
- return 0; + pci_err(pdev, "No reset method!\n"); + return -EINVAL; +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + int ret; + + ret = qm_soft_reset_prepare(qm); + if (ret) + return ret; + + return qm_reset_device(qm); }
static int qm_vf_reset_done(struct hisi_qm *qm) @@ -5155,6 +5177,35 @@ static int qm_get_pci_res(struct hisi_qm *qm) return ret; }
+static int qm_clear_device(struct hisi_qm *qm) +{ + acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + /* Device does not support reset, return */ + if (!qm->err_ini->err_info_init) + return 0; + qm->err_ini->err_info_init(qm); + + if (!handle) + return 0; + + /* No reset method, return */ + if (!acpi_has_method(handle, qm->err_info.acpi_rst)) + return 0; + + ret = qm_master_ooo_check(qm); + if (ret) { + writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); + return ret; + } + + return qm_reset_device(qm); +} + static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5184,8 +5235,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; }
+ ret = qm_clear_device(qm); + if (ret) + goto err_free_vectors; + return 0;
+err_free_vectors: + pci_free_irq_vectors(pdev); err_get_pci_res: qm_put_pci_res(qm); err_disable_pcidev: @@ -5486,7 +5543,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; - u32 val;
ret = qm->ops->set_msi(qm, false); if (ret) { @@ -5494,18 +5550,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm) return ret; }
- /* shutdown OOO register */ - writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, - qm->io_base + ACC_MASTER_GLOBAL_CTRL); - - ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, - val, - (val == ACC_MASTER_TRANS_RETURN_RW), - POLL_PERIOD, POLL_TIMEOUT); - if (ret) { - pci_emerg(pdev, "Bus lock! Please reset system.\n"); + ret = qm_master_ooo_check(qm); + if (ret) return ret; - }
ret = qm_set_pf_mse(qm, false); if (ret) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 75aad04ffe5e..c35533d8930b 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1065,9 +1065,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret;
- qm->err_ini = &sec_err_ini; - qm->err_ini->err_info_init(qm); - ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -1122,6 +1119,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; + qm->err_ini = &sec_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1186,6 +1184,12 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm) { + if (qm->fun_type == QM_HW_VF) + return; + + sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); + sec_close_sva_prefetch(qm); hisi_qm_dev_err_uninit(qm); }
@@ -1274,7 +1278,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: - sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev) sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL); - - if (qm->fun_type == QM_HW_PF) - sec_debug_regs_clear(qm); - sec_show_last_regs_uninit(qm); - sec_probe_uninit(qm);
sec_qm_uninit(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 7c2d803886fd..d07e47b48be0 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1141,8 +1141,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - qm->err_ini = &hisi_zip_err_ini; - qm->err_ini->err_info_init(qm);
ret = hisi_zip_set_user_domain_and_cache(qm); if (ret) @@ -1203,6 +1201,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; + qm->err_ini = &hisi_zip_err_ini; if (pf_q_num_flag) set_bit(QM_MODULE_PARAM, &qm->misc_ctl); } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { @@ -1269,6 +1268,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) return 0; }
+static void hisi_zip_probe_uninit(struct hisi_qm *qm) +{ + if (qm->fun_type == QM_HW_VF) + return; + + hisi_zip_show_last_regs_uninit(qm); + hisi_zip_close_sva_prefetch(qm); + hisi_qm_dev_err_uninit(qm); +} + static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -1295,7 +1304,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm); if (ret) - goto err_dev_err_uninit; + goto err_probe_uninit;
ret = hisi_zip_debugfs_init(qm); if (ret) @@ -1334,9 +1343,8 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL);
-err_dev_err_uninit: - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); +err_probe_uninit: + hisi_zip_probe_uninit(qm);
err_qm_uninit: hisi_zip_qm_uninit(qm); @@ -1358,8 +1366,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); - hisi_zip_show_last_regs_uninit(qm); - hisi_qm_dev_err_uninit(qm); + hisi_zip_probe_uninit(qm); hisi_zip_qm_uninit(qm); }
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index e810d286ee8c..237f87000070 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -495,10 +495,10 @@ static void remove_device_compression_modes(struct iaa_device *iaa_device) if (!device_mode) continue;
- free_device_compression_mode(iaa_device, device_mode); - iaa_device->compression_modes[i] = NULL; if (iaa_compression_modes[i]->free) iaa_compression_modes[i]->free(device_mode); + free_device_compression_mode(iaa_device, device_mode); + iaa_device->compression_modes[i] = NULL; } }
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 8b10926cedba..e8c53bd76f1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -83,7 +83,7 @@ #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */ -#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0) #define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) #define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 #define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 74f0818c0703..55f1ff1e0b32 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -323,6 +323,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) if (hw_data->stop_timer) hw_data->stop_timer(accel_dev);
+ hw_data->disable_iov(accel_dev); + if (wait) msleep(100);
@@ -386,8 +388,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_tl_shutdown(accel_dev);
- hw_data->disable_iov(accel_dev); - if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { hw_data->free_irq(accel_dev); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 0e31f4b41844..0cee3b23dee9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -18,14 +18,17 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - vf->restarting = false; + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + else + vf->restarting = false; + if (!vf->init) continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); - else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) - vf->restarting = true; } }
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c index 1141258db4b6..10c91e56d6be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.c @@ -48,6 +48,20 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
+void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE }; + + /* Check compatibility version */ + if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK) + return; + + if (adf_send_vf2pf_msg(accel_dev, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send Restarting complete event to PF\n"); +} +EXPORT_SYMBOL_GPL(adf_vf2pf_notify_restart_complete); + int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) { u8 pf_version; diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h index 71bc0e3f1d93..d79340ab3134 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_msg.h @@ -6,6 +6,7 @@ #if defined(CONFIG_PCI_IOV) int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev); void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev); +void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev); int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev); int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index cdbb2d687b1b..4ab9ac331519 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -13,6 +13,7 @@ #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_cfg_common.h" +#include "adf_pfvf_vf_msg.h" #include "adf_transport_access_macros.h" #include "adf_transport_internal.h"
@@ -75,6 +76,7 @@ static void adf_dev_stop_async(struct work_struct *work)
/* Re-enable PF2VF interrupts */ adf_enable_pf2vf_interrupts(accel_dev); + adf_vf2pf_notify_restart_complete(accel_dev); kfree(stop_data); }
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 251e088a53df..b11545cc5cb7 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1353,6 +1353,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) ahash->setkey = n2_hmac_async_setkey;
base = &ahash->halg.base; + err = -EINVAL; if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg) >= CRYPTO_MAX_ALG_NAME) goto out_free_p; diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index c670d7d0c11e..6496b075a48d 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -196,7 +196,7 @@ static int qcom_rng_probe(struct platform_device *pdev) if (IS_ERR(rng->clk)) return PTR_ERR(rng->clk);
- rng->of_data = (struct qcom_rng_of_data *)of_device_get_match_data(&pdev->dev); + rng->of_data = (struct qcom_rng_of_data *)device_get_match_data(&pdev->dev);
qcom_rng_dev = rng; ret = crypto_register_rng(&qcom_rng_alg); @@ -247,7 +247,7 @@ static struct qcom_rng_of_data qcom_trng_of_data = { };
static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { - { .id = "QCOM8160", .driver_data = 1 }, + { .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_of_data }, {} }; MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 51132a575b27..73b6498d5e5c 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -390,10 +390,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; if (!size) { - info->dvsec_range[i] = (struct range) { - .start = 0, - .end = CXL_RESOURCE_NONE, - }; continue; }
@@ -411,12 +407,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d,
base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
- info->dvsec_range[i] = (struct range) { + info->dvsec_range[ranges++] = (struct range) { .start = base, .end = base + size - 1 }; - - ranges++; }
info->ranges = ranges; diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index 0fe75eed8973..189a2fc29e74 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -316,7 +316,7 @@ static u64 ehl_err_addr_to_imc_addr(u64 eaddr, int mc) if (igen6_tom <= _4GB) return eaddr + igen6_tolud - _4GB;
- if (eaddr < _4GB) + if (eaddr >= igen6_tom) return eaddr + igen6_tolud - igen6_tom;
return eaddr; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index ea7a9a342dd3..d7416166fd8a 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> +#include <linux/sizes.h> #include <linux/interrupt.h> #include <linux/of.h>
@@ -337,6 +338,7 @@ struct synps_edac_priv { * @get_mtype: Get mtype. * @get_dtype: Get dtype. * @get_ecc_state: Get ECC state. + * @get_mem_info: Get EDAC memory info * @quirks: To differentiate IPs. */ struct synps_platform_data { @@ -344,6 +346,9 @@ struct synps_platform_data { enum mem_type (*get_mtype)(const void __iomem *base); enum dev_type (*get_dtype)(const void __iomem *base); bool (*get_ecc_state)(void __iomem *base); +#ifdef CONFIG_EDAC_DEBUG + u64 (*get_mem_info)(struct synps_edac_priv *priv); +#endif int quirks; };
@@ -402,6 +407,25 @@ static int zynq_get_error_info(struct synps_edac_priv *priv) return 0; }
+#ifdef CONFIG_EDAC_DEBUG +/** + * zynqmp_get_mem_info - Get the current memory info. + * @priv: DDR memory controller private instance data. + * + * Return: host interface address. + */ +static u64 zynqmp_get_mem_info(struct synps_edac_priv *priv) +{ + u64 hif_addr = 0, linear_addr; + + linear_addr = priv->poison_addr; + if (linear_addr >= SZ_32G) + linear_addr = linear_addr - SZ_32G + SZ_2G; + hif_addr = linear_addr >> 3; + return hif_addr; +} +#endif + /** * zynqmp_get_error_info - Get the current ECC error info. * @priv: DDR memory controller private instance data. @@ -922,6 +946,9 @@ static const struct synps_platform_data zynqmp_edac_def = { .get_mtype = zynqmp_get_mtype, .get_dtype = zynqmp_get_dtype, .get_ecc_state = zynqmp_get_ecc_state, +#ifdef CONFIG_EDAC_DEBUG + .get_mem_info = zynqmp_get_mem_info, +#endif .quirks = (DDR_ECC_INTR_SUPPORT #ifdef CONFIG_EDAC_DEBUG | DDR_ECC_DATA_POISON_SUPPORT @@ -975,10 +1002,16 @@ MODULE_DEVICE_TABLE(of, synps_edac_match); static void ddr_poison_setup(struct synps_edac_priv *priv) { int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval; + const struct synps_platform_data *p_data; int index; ulong hif_addr = 0;
- hif_addr = priv->poison_addr >> 3; + p_data = priv->p_data; + + if (p_data->get_mem_info) + hif_addr = p_data->get_mem_info(priv); + else + hif_addr = priv->poison_addr >> 3;
for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) { if (priv->row_shift[index]) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 9a7dc90330a3..a888a001bedb 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -599,11 +599,11 @@ static void complete_transaction(struct fw_card *card, int rcode, u32 request_ts queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
break; + } default: WARN_ON(1); break; } - }
/* Drop the idr's reference */ client_put(client); diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index 4e7944b91e38..0c8908d3b1d6 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -473,6 +473,13 @@ static int scmi_optee_chan_free(int id, void *p, void *data) struct scmi_chan_info *cinfo = p; struct scmi_optee_channel *channel = cinfo->transport_info;
+ /* + * Different protocols might share the same chan info, so a previous + * call might have already freed the structure. + */ + if (!channel) + return 0; + mutex_lock(&scmi_optee_private->mu); list_del(&channel->link); mutex_unlock(&scmi_optee_private->mu); diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index df3182f2e63a..1fd6823248ab 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -96,7 +96,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca }
/* Allocate space for the logs and copy them. */ - status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*log_tbl) + log_size, (void **)&log_tbl);
if (status != EFI_SUCCESS) { diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index 00c379a3cceb..0f5ac346bda4 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -1954,14 +1954,12 @@ static int qcom_scm_probe(struct platform_device *pdev) * will cause the boot stages to enter download mode, unless * disabled below by a clean shutdown/reboot. */ - if (download_mode) - qcom_scm_set_download_mode(true); - + qcom_scm_set_download_mode(download_mode);
/* * Disable SDI if indicated by DT that it is enabled by default. */ - if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled")) + if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode) qcom_scm_disable_sdi();
ret = of_reserved_mem_device_init(__scm->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 094498a0964b..e2382566af44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -117,9 +117,10 @@ * - 3.56.0 - Update IB start address and size alignment for decode and encode * - 3.57.0 - Compute tunneling on GFX10+ * - 3.58.0 - Add GFX12 DCC support + * - 3.59.0 - Cleared VRAM */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 58 +#define KMS_DRIVER_MINOR 59 #define KMS_DRIVER_PATCHLEVEL 0
/* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a060c28f0877..119eebeb16b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -902,10 +902,12 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, { struct amdgpu_vm *vm = params->vm;
- if (!fence || !*fence) + tlb_cb->vm = vm; + if (!fence || !*fence) { + amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); return; + }
- tlb_cb->vm = vm; if (!dma_fence_add_callback(*fence, &tlb_cb->cb, amdgpu_vm_tlb_seq_cb)) { dma_fence_put(vm->last_tlb_flush); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index fb2b394bb9c5..6e9eeaeb3de1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -213,7 +213,7 @@ struct amd_sriov_msg_pf2vf_info { uint32_t gpu_capacity; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; -}; +} __packed;
struct amd_sriov_msg_vf2pf_info_header { /* the total structure size in byte */ @@ -273,7 +273,7 @@ struct amd_sriov_msg_vf2pf_info { uint32_t mes_info_size; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE]; -}; +} __packed;
/* mailbox message send from guest to host */ enum amd_sriov_mailbox_request_message { diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 25feab188dfe..ebf83fee43bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -2065,26 +2065,29 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder) fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; - int edid_size = - max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); - edid = kmalloc(edid_size, GFP_KERNEL); + int edid_size; + + if (fake_edid_record->ucFakeEDIDLength == 128) + edid_size = fake_edid_record->ucFakeEDIDLength; + else + edid_size = fake_edid_record->ucFakeEDIDLength * 128; + edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0], + edid_size, GFP_KERNEL); if (edid) { - memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], - fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) { adev->mode_info.bios_hardcoded_edid = edid; adev->mode_info.bios_hardcoded_edid_size = edid_size; - } else + } else { kfree(edid); + } } + record += struct_size(fake_edid_record, + ucFakeEDIDString, + edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; } - record += fake_edid_record->ucFakeEDIDLength ? - struct_size(fake_edid_record, - ucFakeEDIDString, - fake_edid_record->ucFakeEDIDLength) : - /* empty fake edid record must be 3 bytes long */ - sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index e45d23e82878..34b95ca700b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -202,12 +202,16 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) };
-static const struct soc15_reg_golden golden_settings_gc_12_0[] = { +static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) };
+static const struct soc15_reg_golden golden_settings_gc_12_0[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000), +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -3446,10 +3450,14 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): + soc15_program_register_sequence(adev, + golden_settings_gc_12_0, + (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); + if (adev->rev_id == 0) soc15_program_register_sequence(adev, - golden_settings_gc_12_0, - (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); + golden_settings_gc_12_0_rev0, + (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 8aded0a67037..18a0b0441457 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -160,7 +160,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, int api_status_off) { union MESAPI__QUERY_MES_STATUS mes_status_pkt; - signed long timeout = 3000000; /* 3000 ms */ + signed long timeout = 2100000; /* 2100 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring[0]; struct MES_API_STATUS *api_status; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index a79a8adc3aa5..48b3c4e4b1ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -146,7 +146,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, int api_status_off) { union MESAPI__QUERY_MES_STATUS mes_status_pkt; - signed long timeout = 3000000; /* 3000 ms */ + signed long timeout = 2100000; /* 2100 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring[pipe]; spinlock_t *ring_lock = &mes->ring_lock[pipe]; @@ -453,6 +453,11 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, union MESAPI__MISC misc_pkt; int pipe;
+ if (mes->adev->enable_uni_mes) + pipe = AMDGPU_MES_KIQ_PIPE; + else + pipe = AMDGPU_MES_SCHED_PIPE; + memset(&misc_pkt, 0, sizeof(misc_pkt));
misc_pkt.header.type = MES_API_TYPE_SCHEDULER; @@ -487,6 +492,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; break; case MES_MISC_OP_SET_SHADER_DEBUGGER: + pipe = AMDGPU_MES_SCHED_PIPE; misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER; misc_pkt.set_shader_debugger.process_context_addr = input->set_shader_debugger.process_context_addr; @@ -504,11 +510,6 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, return -EINVAL; }
- if (mes->adev->enable_uni_mes) - pipe = AMDGPU_MES_KIQ_PIPE; - else - pipe = AMDGPU_MES_SCHED_PIPE; - return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &misc_pkt, sizeof(misc_pkt), offsetof(union MESAPI__MISC, api_status)); @@ -582,6 +583,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.disable_mes_log = 1; mes_set_hw_res_pkt.use_different_vmid_compute = 1; mes_set_hw_res_pkt.enable_reg_active_poll = 1; + mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
/* * Keep oversubscribe timer for sdma . When we have unmapped doorbell diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index ecee9e7d7e4c..403c177f2434 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -1022,13 +1022,16 @@ static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib, unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_CPV(1); + ib->ptr[ib->length_dw++] = bytes - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src); ib->ptr[ib->length_dw++] = upper_32_bits(src); ib->ptr[ib->length_dw++] = lower_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = 0;
}
@@ -1631,7 +1634,7 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev) }
static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = { - .copy_pte_num_dw = 7, + .copy_pte_num_dw = 8, .copy_pte = sdma_v7_0_vm_copy_pte, .write_pte = sdma_v7_0_vm_write_pte, .set_pte_pde = sdma_v7_0_vm_set_pte_pde, diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index b0c3678cfb31..fd4c3d4f8387 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -250,13 +250,6 @@ static void soc24_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); }
-static void soc24_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version soc24_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, @@ -454,6 +447,11 @@ static int soc24_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_get_irq(adev);
+ /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; }
@@ -491,7 +489,7 @@ static int soc24_common_hw_init(void *handle) adev->df.funcs->hw_init(adev);
/* enable the doorbell aperture */ - soc24_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true);
return 0; } @@ -500,8 +498,13 @@ static int soc24_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* disable the doorbell aperture */ - soc24_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc21_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_put_irq(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 8d75061f9f38..4fc307ab0ff8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -1347,170 +1347,6 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) } }
-static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) -{ - struct drm_gpu_scheduler **scheds; - - /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) - return -EINVAL; - - /* if VCN0 is harvested, we can't support AV1 */ - if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) - return -EINVAL; - - scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] - [AMDGPU_RING_PRIO_0].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); - return 0; -} - -static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_va_mapping *map; - uint32_t *msg, num_buffers; - struct amdgpu_bo *bo; - uint64_t start, end; - unsigned int i; - void *ptr; - int r; - - addr &= AMDGPU_GMC_HOLE_MASK; - r = amdgpu_cs_find_mapping(p, addr, &bo, &map); - if (r) { - DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); - return r; - } - - start = map->start * AMDGPU_GPU_PAGE_SIZE; - end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; - if (addr & 0x7) { - DRM_ERROR("VCN messages must be 8 byte aligned!\n"); - return -EINVAL; - } - - bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) { - DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); - return r; - } - - r = amdgpu_bo_kmap(bo, &ptr); - if (r) { - DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); - return r; - } - - msg = ptr + addr - start; - - /* Check length */ - if (msg[1] > end - addr) { - r = -EINVAL; - goto out; - } - - if (msg[3] != RDECODE_MSG_CREATE) - goto out; - - num_buffers = msg[2]; - for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { - uint32_t offset, size, *create; - - if (msg[0] != RDECODE_MESSAGE_CREATE) - continue; - - offset = msg[1]; - size = msg[2]; - - if (offset + size > end) { - r = -EINVAL; - goto out; - } - - create = ptr + addr + offset - start; - - /* H264, HEVC and VP9 can run on any instance */ - if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) - continue; - - r = vcn_v4_0_5_limit_sched(p, job); - if (r) - goto out; - } - -out: - amdgpu_bo_kunmap(bo); - return r; -} - -#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) -#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) - -#define RADEON_VCN_ENGINE_INFO (0x30000001) -#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 - -#define RENCODE_ENCODE_STANDARD_AV1 2 -#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 -#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 - -/* return the offset in ib if id is found, -1 otherwise - * to speed up the searching we only search upto max_offset - */ -static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) -{ - int i; - - for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { - if (ib->ptr[i + 1] == id) - return i; - } - return -1; -} - -static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, - struct amdgpu_job *job, - struct amdgpu_ib *ib) -{ - struct amdgpu_ring *ring = amdgpu_job_ring(job); - struct amdgpu_vcn_decode_buffer *decode_buffer; - uint64_t addr; - uint32_t val; - int idx; - - /* The first instance can decode anything */ - if (!ring->me) - return 0; - - /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ - idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, - RADEON_VCN_ENGINE_INFO_MAX_OFFSET); - if (idx < 0) /* engine info is missing */ - return 0; - - val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ - if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { - decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; - - if (!(decode_buffer->valid_buf_flag & 0x1)) - return 0; - - addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | - decode_buffer->msg_buffer_address_lo; - return vcn_v4_0_5_dec_msg(p, job, addr); - } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { - idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, - RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); - if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) - return vcn_v4_0_5_limit_sched(p, job); - } - return 0; -} - static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1518,7 +1354,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, .set_wptr = vcn_v4_0_5_unified_ring_set_wptr, - .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c index d163d92a692f..2b72d5b4949b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c @@ -341,6 +341,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, m->sdmax_rlcx_doorbell_offset = q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
+ m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum + << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT) + & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK; + m->sdma_engine_id = q->sdma_engine_id; m->sdma_queue_id = q->sdma_queue_id;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1e069fa5211e..74bb1e0e9134 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -176,6 +176,7 @@ MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); +static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { @@ -1740,7 +1741,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * /* Send the chunk */ ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000); if (ret != DMUB_STATUS_OK) - /* No need to free bb here since it shall be done unconditionally <elsewhere> */ + /* No need to free bb here since it shall be done in dm_sw_fini() */ return NULL; }
@@ -1755,25 +1756,41 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * static enum dmub_ips_disable_type dm_get_default_ips_mode( struct amdgpu_device *adev) { - /* - * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to - * cause a hard hang. A fix exists for newer PMFW. - * - * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest - * IPS state in all cases, except for s0ix and all displays off (DPMS), - * where IPS2 is allowed. - * - * When checking pmfw version, use the major and minor only. - */ - if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) && - (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) - return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE;
- if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) - return DMUB_IPS_ENABLE; + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + /* + * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to + * cause a hard hang. A fix exists for newer PMFW. + * + * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest + * IPS state in all cases, except for s0ix and all displays off (DPMS), + * where IPS2 is allowed. + * + * When checking pmfw version, use the major and minor only. + */ + if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) + /* + * Other ASICs with DCN35 that have residency issues with + * IPS2 in idle. + * We want them to use IPS2 only in display off cases. + */ + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + case IP_VERSION(3, 5, 1): + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + default: + /* ASICs older than DCN35 do not have IPSs */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) + ret = DMUB_IPS_DISABLE_ALL; + break; + }
- /* ASICs older than DCN35 do not have IPSs */ - return DMUB_IPS_DISABLE_ALL; + return ret; }
static int amdgpu_dm_init(struct amdgpu_device *adev) @@ -2489,8 +2506,17 @@ static int dm_sw_init(void *handle) static int dm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct dal_allocation *da; + + list_for_each_entry(da, &adev->dm.da_list, list) { + if (adev->dm.bb_from_dmub == (void *) da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + }
- kfree(adev->dm.bb_from_dmub); adev->dm.bb_from_dmub = NULL;
kfree(adev->dm.dmub_fb_info); @@ -3230,8 +3256,11 @@ static int dm_resume(void *handle) drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + }
/* * atomic_check is expected to create the dc states. We need to release @@ -4428,6 +4457,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, @@ -4442,6 +4472,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, return;
amdgpu_acpi_get_backlight_caps(&caps); + + /* validate the firmware value is sane */ + if (caps.caps_valid) { + int spread = caps.max_input_signal - caps.min_input_signal; + + if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps.min_input_signal < AMDGPU_DM_DEFAULT_MIN_BACKLIGHT || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", + caps.min_input_signal, caps.max_input_signal); + caps.caps_valid = false; + } + } + if (caps.caps_valid) { dm->backlight_caps[bl_idx].caps_valid = true; if (caps.aux_support) @@ -6471,7 +6516,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n", + __func__, drm_connector->name); } } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, @@ -6490,7 +6536,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_link_get_highest_encoding_format(aconnector->dc_link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n", __func__, drm_connector->name); } } @@ -11651,7 +11697,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (dc_resource_is_dsc_encoding_supported(dc)) { ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); if (ret) { - drm_dbg_atomic(dev, "compute_mst_dsc_configs_for_state() failed\n"); + drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n"); ret = -EINVAL; goto fail; } @@ -11672,7 +11718,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ ret = drm_dp_mst_atomic_check(state); if (ret) { - drm_dbg_atomic(dev, "drm_dp_mst_atomic_check() failed\n"); + drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } status = dc_validate_global_state(dc, dm_state->context, true); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index b490ae67b6be..8befc79478f8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -759,7 +759,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( uint8_t ret = 0;
drm_dbg_dp(aux->drm_dev, - "Configure DSC to non-virtual dpcd synaptics\n"); + "MST_DSC Configure DSC to non-virtual dpcd synaptics\n");
if (enable) { /* When DSC is enabled on previous boot and reboot with the hub, @@ -772,7 +772,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( apply_synaptics_fifo_reset_wa(aux);
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); - DRM_INFO("Send DSC enable to synaptics\n"); + DRM_INFO("MST_DSC Send DSC enable to synaptics\n");
} else { /* Synaptics hub not support virtual dpcd, @@ -781,7 +781,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( */ if (!stream->link->link_status.link_active) { ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); - DRM_INFO("Send DSC disable to synaptics\n"); + DRM_INFO("MST_DSC Send DSC disable to synaptics\n"); } }
@@ -823,14 +823,14 @@ bool dm_helpers_dp_write_dsc_enable( DP_DSC_ENABLE, &enable_passthrough, 1); drm_dbg_dp(dev, - "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", ret); }
ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Sent DSC decoding enable to %s port, ret = %u\n", + "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n", (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret); @@ -838,7 +838,7 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Sent DSC decoding disable to %s port, ret = %u\n", + "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n", (port->passthrough_aux) ? "remote RX" : "virtual dpcd", ret); @@ -848,7 +848,7 @@ bool dm_helpers_dp_write_dsc_enable( DP_DSC_ENABLE, &enable_passthrough, 1); drm_dbg_dp(dev, - "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", ret); } } @@ -858,12 +858,12 @@ bool dm_helpers_dp_write_dsc_enable( if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Send DSC %s to SST RX\n", + "SST_DSC Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); drm_dbg_dp(dev, - "Send DSC %s to DP-HDMI PCON\n", + "SST_DSC Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 2e9f6da1acdc..6c72709aa258 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -253,7 +253,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */ - if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) + if (is_synaptics_cascaded_panamera(aconnector->dc_link, port)) aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux) @@ -578,6 +578,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, if (!aconnector) return NULL;
+ DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port); + connector = &aconnector->base; aconnector->mst_output_port = port; aconnector->mst_root = master; @@ -872,11 +874,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].sink) { if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && params[i].sink->sink_signal != SIGNAL_TYPE_NONE) - DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, + DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i, params[i].sink->edid_caps.display_name); }
- DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", + DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n", params[i].timing->flags.DSC, params[i].timing->dsc_cfg.bits_per_pixel, vars[i + k].pbn); @@ -1054,6 +1056,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, if (next_index == -1) break;
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, @@ -1064,10 +1067,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
ret = drm_dp_mst_atomic_check(state); if (ret == 0) { + DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index); vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index); + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1082,6 +1087,15 @@ static int try_disable_dsc(struct drm_atomic_state *state, return 0; }
+static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k) +{ + int i; + + for (i = 0; i < count; i++) + DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n", + i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn); +} + static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, @@ -1104,6 +1118,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return PTR_ERR(mst_state);
/* Set up params */ + DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count); for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0};
@@ -1132,7 +1147,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; - dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -1145,6 +1160,9 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(dc_link));
+ DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n", + count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps, + params[count].bw_range.stream_kbps); count++; }
@@ -1159,6 +1177,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, *link_vars_start_index += count;
/* Try no compression */ + DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); @@ -1177,7 +1196,10 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return ret; }
+ log_dsc_params(count, vars, k); + /* Try max compression */ + DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); @@ -1201,14 +1223,26 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret != 0) return ret;
+ log_dsc_params(count, vars, k); + /* Optimize degree of compression */ + DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n"); ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n"); return ret; + }
+ log_dsc_params(count, vars, k); + + DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n"); ret = try_disable_dsc(state, dc_link, params, vars, count, k); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n"); return ret; + } + + log_dsc_params(count, vars, k);
set_dsc_configs_from_fairness_vars(params, vars, count, k);
@@ -1230,17 +1264,19 @@ static bool is_dsc_need_re_compute(
/* only check phy used by dsc mst branch */ if (dc_link->type != dc_connection_mst_branch) - return false; + goto out;
/* add a check for older MST DSC with no virtual DPCDs */ if (needs_dsc_aux_workaround(dc_link) && (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))) - return false; + goto out;
for (i = 0; i < MAX_PIPES; i++) stream_on_link[i] = NULL;
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count); + /* check if there is mode change in new request */ for (i = 0; i < dc_state->stream_count; i++) { struct drm_crtc_state *new_crtc_state; @@ -1250,6 +1286,8 @@ static bool is_dsc_need_re_compute( if (!stream) continue;
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream); + /* check if stream using the same link for mst */ if (stream->link != dc_link) continue; @@ -1262,8 +1300,11 @@ static bool is_dsc_need_re_compute( new_stream_on_link_num++;
new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); - if (!new_conn_state) + if (!new_conn_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n", + __func__, __LINE__, stream, aconnector); continue; + }
if (IS_ERR(new_conn_state)) continue; @@ -1272,19 +1313,37 @@ static bool is_dsc_need_re_compute( continue;
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); - if (!new_crtc_state) + if (!new_crtc_state) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n", + __func__, __LINE__, stream, aconnector); continue; + }
if (IS_ERR(new_crtc_state)) continue;
if (new_crtc_state->enable && new_crtc_state->active) { if (new_crtc_state->mode_changed || new_crtc_state->active_changed || - new_crtc_state->connectors_changed) - return true; + new_crtc_state->connectors_changed) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompte required." + "stream 0x%p in new dc_state\n", + __func__, __LINE__, stream); + is_dsc_need_re_compute = true; + goto out; + } } }
+ if (new_stream_on_link_num == 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n", + __func__, __LINE__); + is_dsc_need_re_compute = false; + goto out; + } + + DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n", + __func__, dc->current_state->stream_count); + if (new_stream_on_link_num == 0) return false;
@@ -1310,11 +1369,18 @@ static bool is_dsc_need_re_compute(
if (j == new_stream_on_link_num) { /* not in new state */ + DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required." + "stream 0x%p in current dc_state but not in new dc_state\n", + __func__, __LINE__, stream); is_dsc_need_re_compute = true; break; } }
+out: + DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n", + __func__, is_dsc_need_re_compute ? "required" : "not required"); + return is_dsc_need_re_compute; }
@@ -1343,6 +1409,9 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n", + __func__, stream, aconnector); + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue;
@@ -1375,8 +1444,11 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1) - if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) + if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n", + __func__, __LINE__, stream); return -EINVAL; + } }
return ret; @@ -1405,6 +1477,9 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n", + i, stream, aconnector); + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) continue;
@@ -1494,12 +1569,12 @@ int pre_validate_dsc(struct drm_atomic_state *state, int ret = 0;
if (!is_dsc_precompute_needed(state)) { - DRM_INFO_ONCE("DSC precompute is not needed.\n"); + DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__); return 0; } ret = dm_atomic_get_state(state, dm_state_ptr); if (ret != 0) { - DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); + DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__); return ret; } dm_state = *dm_state_ptr; @@ -1553,7 +1628,8 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); if (ret != 0) { - DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n", + __func__, __LINE__); ret = -EINVAL; goto clean_exit; } @@ -1567,12 +1643,15 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (local_dc_state->streams[i] && dc_is_timing_changed(stream, local_dc_state->streams[i])) { - DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); + DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i); } else { int ind = find_crtc_index_in_state_by_stream(state, stream);
- if (ind >= 0) + if (ind >= 0) { + DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n", + __func__, __LINE__, stream); state->crtcs[ind].new_state->mode_changed = 0; + } } } clean_exit: @@ -1605,7 +1684,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, { struct dc_dsc_policy dsc_policy = {0};
- dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, @@ -1697,7 +1776,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (stream_kbps <= end_to_end_bw_in_kbps) { - DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient."); + DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n"); return DC_OK; }
@@ -1710,7 +1789,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( /*capable of dsc passthough. dsc bitstream along the entire path*/ if (aconnector->mst_output_port->passthrough_aux) { if (bw_range.min_kbps > end_to_end_bw_in_kbps) { - DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n"); + DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint" + "Max dsc compression bw can't fit into end-to-end bw\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } else { @@ -1721,7 +1801,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( /*Get last DP link BW capability*/ if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) { if (stream_kbps > end_link_bw) { - DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n"); + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." + "Mode required bw can't fit into last link\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } @@ -1734,7 +1815,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { - DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n"); + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." + "Max dsc compression can't fit into MST available bw\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } @@ -1751,9 +1833,9 @@ enum dc_status dm_dp_mst_is_port_support_mode( dc_link_get_highest_encoding_format(stream->link), &stream->timing.dsc_cfg)) { stream->timing.flags.DSC = 1; - DRM_DEBUG_DRIVER("Require dsc and dsc config found\n"); + DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n"); } else { - DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n"); + DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n"); return DC_FAIL_BANDWIDTH_VALIDATE; }
@@ -1775,11 +1857,11 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (branch_max_throughput_mps != 0 && ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) { - DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails"); + DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } } else { - DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config."); + DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 70ee0089a20d..a54d7358d9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -1204,6 +1204,12 @@ void dcn35_clk_mgr_construct( ctx->dc->debug.disable_dpp_power_gate = false; ctx->dc->debug.disable_hubp_power_gate = false; ctx->dc->debug.disable_dsc_power_gate = false; + + /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */ + if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE && + ctx->dce_version == DCN_VERSION_3_5 && + ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00)) + ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; } else { /*let's reset the config control flag*/ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index fe3078b8789e..01c07545ef6b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -100,7 +100,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( */ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy); + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding);
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index 06387b8b0aee..2baaf602815e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -859,7 +859,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->immediate_flip = plane_state->flip_immediate;
- plane->composition.rect_out_height_spans_vactive = plane_state->dst_rect.height >= stream->timing.v_addressable; + plane->composition.rect_out_height_spans_vactive = + plane_state->dst_rect.height >= stream->timing.v_addressable && + stream->dst.height >= stream->timing.v_addressable; }
//TODO : Could be possibly moved to a common helper layer. diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index 6547cc2c2a77..a8bebc60f059 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -810,9 +810,11 @@ static void build_synchronized_timing_groups( /* find synchronizable timing groups */ for (j = i + 1; j < display_config->display_config.num_streams; j++) { if (memcmp(master_timing, - &display_config->display_config.stream_descriptors[j].timing, - sizeof(struct dml2_timing_cfg)) == 0 && - display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) { + &display_config->display_config.stream_descriptors[j].timing, + sizeof(struct dml2_timing_cfg)) == 0 && + display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder && + (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match + display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) { set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j); set_bit_in_bitfield(&stream_mapped_mask, j); } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index a1727e5bf024..ff5c4236ae70 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -882,7 +882,7 @@ static bool setup_dsc_config(
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
- dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy); + dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
@@ -1171,7 +1171,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy) + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding) { uint32_t bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 1f2eb2f727dc..4d6e90c49ad5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -57,6 +57,7 @@ #include "panel_cntl.h" #include "dc_state_priv.h" #include "dpcd_defs.h" +#include "dsc.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" @@ -1815,6 +1816,48 @@ static void get_edp_links_with_sink( } }
+static void clean_up_dsc_blocks(struct dc *dc) +{ + struct display_stream_compressor *dsc = NULL; + struct timing_generator *tg = NULL; + struct stream_encoder *se = NULL; + struct dccg *dccg = dc->res_pool->dccg; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + int i; + + if (dc->ctx->dce_version != DCN_VERSION_3_5 && + dc->ctx->dce_version != DCN_VERSION_3_51) + return; + + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { + struct dcn_dsc_state s = {0}; + + dsc = dc->res_pool->dscs[i]; + dsc->funcs->dsc_read_state(dsc, &s); + if (s.dsc_fw_en) { + /* disable DSC in OPTC */ + if (i < dc->res_pool->timing_generator_count) { + tg = dc->res_pool->timing_generators[i]; + tg->funcs->set_dsc_config(tg, OPTC_DSC_DISABLED, 0, 0); + } + /* disable DSC in stream encoder */ + if (i < dc->res_pool->stream_enc_count) { + se = dc->res_pool->stream_enc[i]; + se->funcs->dp_set_dsc_config(se, OPTC_DSC_DISABLED, 0, 0); + se->funcs->dp_set_dsc_pps_info_packet(se, false, NULL, true); + } + /* disable DSC block */ + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc->inst); + dsc->funcs->dsc_disable(dsc); + + /* power down DSC */ + if (pg_cntl != NULL) + pg_cntl->funcs->dsc_pg_control(pg_cntl, dsc->inst, false); + } + } +} + /* * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need: * 1. Power down all DC HW blocks @@ -1917,6 +1960,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc); + + /* DSC could be enabled on eDP during VBIOS post. + * To clean up dsc blocks if eDP is in link but not active. + */ + if (edp_link_with_sink && (edp_stream_num == 0)) + clean_up_dsc_blocks(dc); + disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index eaeeade31ed7..2b4dae2d4b0c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -398,7 +398,11 @@ bool dcn30_set_output_transfer_func(struct dc *dc, } }
- mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + else + DC_LOG_ERROR("%s: set_output_gamma function pointer is NULL.\n", __func__); + return ret; }
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 05d8f81daa06..df80072174b7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -982,8 +982,19 @@ void dcn32_init_hw(struct dc *dc) dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
- if (dc->ctx->dmub_srv->dmub->fw_version < + /* for DCN401 testing only */ + dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; + if (dc->caps.dmub_caps.fams_ver == 2) { + /* FAMS2 is enabled */ + dc->debug.fams2_config.bits.enable &= true; + } else if (dc->ctx->dmub_srv->dmub->fw_version < DMUB_FW_VERSION(7, 0, 35)) { + /* FAMS2 is disabled */ + dc->debug.fams2_config.bits.enable = false; + if (dc->debug.using_dml2 && dc->res_pool->funcs->update_bw_bounding_box) { + /* update bounding box if FAMS2 disabled */ + dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); + } dc->debug.force_disable_subvp = true; dc->debug.disable_fpo_optimizations = true; } @@ -1018,6 +1029,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0}; + + if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + }
/* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index d5e9aec52a05..80db40787e01 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -375,7 +375,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0};
+ if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + } /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c index e1257404357b..cec68c5dba13 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -28,6 +28,8 @@ #include "dccg.h" #include "clk_mgr.h"
+#define DC_LOGGER link->ctx->logger + void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { @@ -108,6 +110,11 @@ void enable_hpo_dp_link_output(struct dc_link *link, enum clock_source_id clock_source, const struct dc_link_settings *link_settings) { + if (!link_res->hpo_dp_link_enc) { + DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__); + return; + } + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( link->dc->res_pool->dccg, @@ -124,6 +131,11 @@ void disable_hpo_dp_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal) { + if (!link_res->hpo_dp_link_enc) { + DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__); + return; + } + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); link_res->hpo_dp_link_enc->funcs->disable_link_phy( link_res->hpo_dp_link_enc, signal); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index ddf251901fb3..c7e67e947f16 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -2153,6 +2153,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.override_det_buffer_size_kbytes = true;
if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 4c5e722baa3a..da9101b83e8c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -736,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = { .hdmichar = true, .dpstream = true, .symclk32_se = true, - .symclk32_le = true, + .symclk32_le = false, .symclk_fe = true, .physymclk = false, .dpiasymclk = true, @@ -2133,6 +2133,7 @@ static bool dcn351_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.override_det_buffer_size_kbytes = true;
if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index a40e6590215a..bbd259cea4f4 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -134,7 +134,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total), 1000000); + stream->timing.h_total) + 500000, 1000000);
/* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index a887ab945dfa..1d024b122b0c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2569,10 +2569,14 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } }
- return smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, workload_mask, NULL); + if (!ret) + smu->workload_mask = workload_mask; + + return ret; }
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7bc95c404377..b891a5e0a396 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2501,8 +2501,11 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu->workload_mask = (1 << workload_type);
return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 2b45adecbed2..ba17d01e6439 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1570,10 +1570,14 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL;
- return smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (!ret) + smu->workload_mask = 1 << workload_type; + + return ret; }
static int smu_v14_0_2_baco_enter(struct smu_context *smu) diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 1a9defa15663..e265ab3c8c92 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -422,22 +422,6 @@ static const struct drm_connector_funcs lt8912_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, };
-static enum drm_mode_status -lt8912_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - if (mode->clock > 150000) - return MODE_CLOCK_HIGH; - - if (mode->hdisplay > 1920) - return MODE_BAD_HVALUE; - - if (mode->vdisplay > 1080) - return MODE_BAD_VVALUE; - - return MODE_OK; -} - static int lt8912_connector_get_modes(struct drm_connector *connector) { const struct drm_edid *drm_edid; @@ -463,7 +447,6 @@ static int lt8912_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = { .get_modes = lt8912_connector_get_modes, - .mode_valid = lt8912_connector_mode_valid, };
static void lt8912_bridge_mode_set(struct drm_bridge *bridge, @@ -605,6 +588,23 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) drm_bridge_hpd_disable(lt->hdmi_port); }
+static enum drm_mode_status +lt8912_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 150000) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > 1920) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > 1080) + return MODE_BAD_VVALUE; + + return MODE_OK; +} + static enum drm_connector_status lt8912_bridge_detect(struct drm_bridge *bridge) { @@ -635,6 +635,7 @@ static const struct drm_edid *lt8912_bridge_edid_read(struct drm_bridge *bridge, static const struct drm_bridge_funcs lt8912_bridge_funcs = { .attach = lt8912_bridge_attach, .detach = lt8912_bridge_detach, + .mode_valid = lt8912_bridge_mode_valid, .mode_set = lt8912_bridge_mode_set, .enable = lt8912_bridge_enable, .detect = lt8912_bridge_detect, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 1b111e2c3347..752339d33f39 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1174,7 +1174,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev; - ctx->drm_dev = drm_dev; + ipp->drm_dev = drm_dev; exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs, diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 6f34f573e127..a90504359e8d 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -69,6 +69,8 @@ struct mtk_crtc { /* lock for display hardware access */ struct mutex hw_lock; bool config_updating; + /* lock for config_updating to cmd buffer */ + spinlock_t config_lock; };
struct mtk_crtc_state { @@ -106,11 +108,16 @@ static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc) { + unsigned long flags; + drm_crtc_handle_vblank(&mtk_crtc->base); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { mtk_crtc_finish_page_flip(mtk_crtc); mtk_crtc->pending_needs_vblank = false; } + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); }
#if IS_REACHABLE(CONFIG_MTK_CMDQ) @@ -308,12 +315,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client); struct mtk_crtc_state *state; unsigned int i; + unsigned long flags;
if (data->sta < 0) return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags); + if (mtk_crtc->config_updating) { + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + goto ddp_cmdq_cb_out; + } + state->pending_config = false;
if (mtk_crtc->pending_planes) { @@ -340,6 +354,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) mtk_crtc->pending_async_planes = false; }
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + +ddp_cmdq_cb_out: + mtk_crtc->cmdq_vblank_cnt = 0; wake_up(&mtk_crtc->cb_blocking_queue); } @@ -449,6 +467,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; struct drm_crtc *crtc = &mtk_crtc->base; + unsigned long flags; int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { @@ -480,10 +499,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc) pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) { - spin_lock_irq(&crtc->dev->event_lock); + spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; - spin_unlock_irq(&crtc->dev->event_lock); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } }
@@ -569,9 +588,14 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) struct mtk_drm_private *priv = crtc->dev->dev_private; unsigned int pending_planes = 0, pending_async_planes = 0; int i; + unsigned long flags;
mutex_lock(&mtk_crtc->hw_lock); + + spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = true; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + if (needs_vblank) mtk_crtc->pending_needs_vblank = true;
@@ -625,7 +649,10 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank) mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif + spin_lock_irqsave(&mtk_crtc->config_lock, flags); mtk_crtc->config_updating = false; + spin_unlock_irqrestore(&mtk_crtc->config_lock, flags); + mutex_unlock(&mtk_crtc->hw_lock); }
@@ -1068,6 +1095,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); mutex_init(&mtk_crtc->hw_lock); + spin_lock_init(&mtk_crtc->config_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ) i = priv->mbox_index++; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index c0b5373e90d7..7cfefb5e6221 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring; struct drm_gem_object *obj; uint32_t *ptr, dwords; @@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit } }
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno; a5xx_flush(gpu, ring, true); a5xx_preempt_trigger(gpu);
@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1);
- /* Enable local preemption for finegrain preemption */ + /* + * Disable local preemption by default because it requires + * user-space to be aware of it and provide additional handling + * to restore rendering state or do various flushes on switch. + */ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); - OUT_RING(ring, 0x1); + OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); @@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* Write the fence to the scratch register */ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); OUT_RING(ring, submit->seqno); + a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/* * Execute a CACHE_FLUSH_TS event. This will ensure that the diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index c7187bcc5e90..9c0d701fe4b8 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -34,8 +34,10 @@ struct a5xx_gpu { struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state; + spinlock_t preempt_start_lock; struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index f58dd564d122..0469fea55010 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) /* Return the highest priority ringbuffer with something in it */ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); unsigned long flags; int i;
@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags); empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + if (!empty && ring == a5xx_gpu->cur_ring) + empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i]; spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty) @@ -97,12 +101,19 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) if (gpu->nr_rings == 1) return;
+ /* + * Serialize preemption start to ensure that we always make + * decision on latest state. Otherwise we can get stuck in + * lower priority or empty ring. + */ + spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags); + /* * Try to start preemption by moving from NONE to START. If * unsuccessful, a preemption is already in flight */ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START)) - return; + goto out;
/* Get the next ring to preempt to */ ring = get_next_ring(gpu); @@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) set_preempt_state(a5xx_gpu, PREEMPT_ABORT); update_wptr(gpu, a5xx_gpu->cur_ring); set_preempt_state(a5xx_gpu, PREEMPT_NONE); - return; + goto out; }
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); + /* Make sure the wptr doesn't update while we're in motion */ spin_lock_irqsave(&ring->preempt_lock, flags); a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); @@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1); + return; + +out: + spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags); }
void a5xx_preempt_irq(struct msm_gpu *gpu) @@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu) update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE); + + /* + * Try to trigger preemption again in case there was a submit or + * retire during ring switch + */ + a5xx_preempt_trigger(gpu); }
void a5xx_preempt_hw_init(struct msm_gpu *gpu) @@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) return;
for (i = 0; i < gpu->nr_rings; i++) { + a5xx_gpu->preempt[i]->data = 0; + a5xx_gpu->preempt[i]->info = 0; a5xx_gpu->preempt[i]->wptr = 0; a5xx_gpu->preempt[i]->rptr = 0; a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; @@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu) } }
+ spin_lock_init(&a5xx_gpu->preempt_start_lock); timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 789a11416f7a..0fcae53c0b14 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -388,18 +388,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu, const u32 *debugbus_blocks, *gbif_debugbus_blocks; int i;
- if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { debugbus_blocks = gen7_0_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks); gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { debugbus_blocks = gen7_2_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks); gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks; gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); debugbus_blocks = gen7_9_0_debugbus_blocks; debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks); gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks; @@ -509,7 +509,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, const struct a6xx_debugbus_block *cx_debugbus_blocks;
if (adreno_is_a7xx(adreno_gpu)) { - BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu))); + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); cx_debugbus_blocks = a7xx_cx_debugbus_blocks; nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks); } else { @@ -660,13 +660,16 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu, const struct gen7_sptp_cluster_registers *dbgahb_clusters; unsigned dbgahb_clusters_size;
- if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { dbgahb_clusters = gen7_0_0_sptp_clusters; dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters); - } else { - BUG_ON(!adreno_is_a740_family(adreno_gpu)); + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { dbgahb_clusters = gen7_2_0_sptp_clusters; dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters); + } else { + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); + dbgahb_clusters = gen7_9_0_sptp_clusters; + dbgahb_clusters_size = ARRAY_SIZE(gen7_9_0_sptp_clusters); }
a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state, @@ -818,14 +821,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu, const struct gen7_cluster_registers *clusters; unsigned clusters_size;
- if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { clusters = gen7_0_0_clusters; clusters_size = ARRAY_SIZE(gen7_0_0_clusters); - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { clusters = gen7_2_0_clusters; clusters_size = ARRAY_SIZE(gen7_2_0_clusters); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); clusters = gen7_9_0_clusters; clusters_size = ARRAY_SIZE(gen7_9_0_clusters); } @@ -893,7 +896,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu, if (WARN_ON(datasize > A6XX_CD_DATA_SIZE)) return;
- if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3); }
@@ -923,7 +926,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu, datasize);
out: - if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0); } } @@ -956,14 +959,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu, unsigned num_shader_blocks; int i;
- if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { shader_blocks = gen7_0_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks); - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { shader_blocks = gen7_2_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); shader_blocks = gen7_9_0_shader_blocks; num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks); } @@ -1348,14 +1351,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu, const u32 *pre_crashdumper_regs; const struct gen7_reg_list *reglist;
- if (adreno_is_a730(adreno_gpu)) { + if (adreno_gpu->info->family == ADRENO_7XX_GEN1) { reglist = gen7_0_0_reg_list; pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; - } else if (adreno_is_a740_family(adreno_gpu)) { + } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) { reglist = gen7_2_0_reg_list; pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers; } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); reglist = gen7_9_0_reg_list; pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers; } @@ -1405,8 +1408,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu, struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); const u32 *regs;
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) || - adreno_is_a750(adreno_gpu))); + BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3); regs = gen7_0_0_post_crashdumper_registers;
a7xx_get_ahb_gpu_registers(gpu, @@ -1514,11 +1516,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu, const struct a6xx_indexed_registers *indexed_regs; int i, indexed_count, mempool_count;
- if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) { + if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) { indexed_regs = a7xx_indexed_reglist; indexed_count = ARRAY_SIZE(a7xx_indexed_reglist); } else { - BUG_ON(!adreno_is_a750(adreno_gpu)); + BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3); indexed_regs = gen7_9_0_cp_indexed_reg_list; indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h index 260d66eccfec..9a327d543f27 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h @@ -1303,7 +1303,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = { REG_A6XX_CP_ROQ_DBG_DATA, 0x00800}, { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000}, - { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, + { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR, REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200}, { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR, REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800}, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index ecc3fc5cec22..3896123ec51c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -478,7 +478,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) ret = request_firmware_direct(&fw, fwname, drm->dev); if (!ret) { DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", - newname); + fwname); adreno_gpu->fwloc = FW_LOCATION_LEGACY; goto out; } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index 3a7f7edda96b..500b7dc895d0 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -351,7 +351,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
drm_printf(p, "%s:%d\t%d\t%s\n", pipe2name(pipe), j, inuse, - plane ? plane->name : NULL); + plane ? plane->name : "(null)");
total += inuse; } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 9622e58dce3e..e1228fb093ee 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -119,7 +119,7 @@ struct msm_dp_desc { };
static const struct msm_dp_desc sc7180_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} };
@@ -130,9 +130,9 @@ static const struct msm_dp_desc sc7280_dp_descs[] = { };
static const struct msm_dp_desc sc8180x_dp_descs[] = { - { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 }, - { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 }, - { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 }, + { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, + { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true }, + { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true }, {} };
@@ -149,7 +149,7 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = { };
static const struct msm_dp_desc sm8650_dp_descs[] = { - { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 }, + { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true }, {} };
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 3b59137ca674..031446c87dae 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config config->pll_clock_inverters = 0x00; else config->pll_clock_inverters = 0x40; - } else { + } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) { if (pll_freq <= 1000000000ULL) config->pll_clock_inverters = 0xa0; else if (pll_freq <= 2500000000ULL) @@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config config->pll_clock_inverters = 0x00; else config->pll_clock_inverters = 0x40; + } else { + /* 4.2, 4.3 */ + if (pll_freq <= 1000000000ULL) + config->pll_clock_inverters = 0xa0; + else if (pll_freq <= 2500000000ULL) + config->pll_clock_inverters = 0x20; + else if (pll_freq <= 3500000000ULL) + config->pll_clock_inverters = 0x00; + else + config->pll_clock_inverters = 0x40; }
config->decimal_div_start = dec; diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index e5577d2a19ef..a46613283393 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -397,7 +397,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r;
mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1; @@ -435,14 +435,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i return r; }
- offset = track->cb_color_bo_offset[id] << 8; + offset = (u64)track->cb_color_bo_offset[id] << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d cb[%d] bo base %llu not aligned with %ld\n", __func__, __LINE__, id, offset, surf.base_align); return -EINVAL; }
- offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->cb_color_bo[id])) { /* old ddx are broken they allocate bo with w*h*bpp but * program slice with ALIGN(h, 8), catch this and patch @@ -450,14 +450,14 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i */ if (!surf.mode) { uint32_t *ib = p->ib.ptr; - unsigned long tmp, nby, bsize, size, min = 0; + u64 tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */ if (surf.nby > 8) { min = surf.nby - 8; } bsize = radeon_bo_size(track->cb_color_bo[id]); - tmp = track->cb_color_bo_offset[id] << 8; + tmp = (u64)track->cb_color_bo_offset[id] << 8; for (nby = surf.nby; nby > min; nby--) { size = nby * surf.nbx * surf.bpe * surf.nsamples; if ((tmp + size * mslice) <= bsize) { @@ -469,7 +469,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i slice = ((nby * surf.nbx) / 64) - 1; if (!evergreen_surface_check(p, &surf, "cb")) { /* check if this one works */ - tmp += surf.layer_size * mslice; + tmp += (u64)surf.layer_size * mslice; if (tmp <= bsize) { ib[track->cb_color_slice_idx[id]] = slice; goto old_ddx_ok; @@ -478,9 +478,9 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i } } dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " - "offset %d, max layer %d, bo size %ld, slice %d)\n", + "offset %llu, max layer %d, bo size %ld, slice %d)\n", __func__, __LINE__, id, surf.layer_size, - track->cb_color_bo_offset[id] << 8, mslice, + (u64)track->cb_color_bo_offset[id] << 8, mslice, radeon_bo_size(track->cb_color_bo[id]), slice); dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n", __func__, __LINE__, surf.nbx, surf.nby, @@ -564,7 +564,7 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; @@ -610,18 +610,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) return r; }
- offset = track->db_s_read_offset << 8; + offset = (u64)track->db_s_read_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_read_bo)) { dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_s_read_offset << 8, mslice, + (u64)track->db_s_read_offset << 8, mslice, radeon_bo_size(track->db_s_read_bo)); dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n", __func__, __LINE__, track->db_depth_size, @@ -629,18 +629,18 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p) return -EINVAL; }
- offset = track->db_s_write_offset << 8; + offset = (u64)track->db_s_write_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_s_write_bo)) { dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_s_write_offset << 8, mslice, + (u64)track->db_s_write_offset << 8, mslice, radeon_bo_size(track->db_s_write_bo)); return -EINVAL; } @@ -661,7 +661,7 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) struct evergreen_cs_track *track = p->track; struct eg_surface surf; unsigned pitch, slice, mslice; - unsigned long offset; + u64 offset; int r;
mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1; @@ -708,34 +708,34 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p) return r; }
- offset = track->db_z_read_offset << 8; + offset = (u64)track->db_z_read_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil read bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_read_bo)) { dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_z_read_offset << 8, mslice, + (u64)track->db_z_read_offset << 8, mslice, radeon_bo_size(track->db_z_read_bo)); return -EINVAL; }
- offset = track->db_z_write_offset << 8; + offset = (u64)track->db_z_write_offset << 8; if (offset & (surf.base_align - 1)) { - dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", + dev_warn(p->dev, "%s:%d stencil write bo base %llu not aligned with %ld\n", __func__, __LINE__, offset, surf.base_align); return -EINVAL; } - offset += surf.layer_size * mslice; + offset += (u64)surf.layer_size * mslice; if (offset > radeon_bo_size(track->db_z_write_bo)) { dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " - "offset %ld, max layer %d, bo size %ld)\n", + "offset %llu, max layer %d, bo size %ld)\n", __func__, __LINE__, surf.layer_size, - (unsigned long)track->db_z_write_offset << 8, mslice, + (u64)track->db_z_write_offset << 8, mslice, radeon_bo_size(track->db_z_write_bo)); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 10793a433bf5..d698a899eaf4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1717,26 +1717,29 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; - int edid_size = - max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); - edid = kmalloc(edid_size, GFP_KERNEL); + int edid_size; + + if (fake_edid_record->ucFakeEDIDLength == 128) + edid_size = fake_edid_record->ucFakeEDIDLength; + else + edid_size = fake_edid_record->ucFakeEDIDLength * 128; + edid = kmemdup(&fake_edid_record->ucFakeEDIDString[0], + edid_size, GFP_KERNEL); if (edid) { - memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], - fake_edid_record->ucFakeEDIDLength); - if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; rdev->mode_info.bios_hardcoded_edid_size = edid_size; - } else + } else { kfree(edid); + } } + record += struct_size(fake_edid_record, + ucFakeEDIDString, + edid_size); + } else { + /* empty fake edid record must be 3 bytes long */ + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; } - record += fake_edid_record->ucFakeEDIDLength ? - struct_size(fake_edid_record, - ucFakeEDIDString, - fake_edid_record->ucFakeEDIDLength) : - /* empty fake edid record must be 3 bytes long */ - sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index fe33092abbe7..aae48e906af1 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -434,6 +434,8 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data) HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK, RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK | RK3328_HDMI_HPD_IOE)); + + dw_hdmi_rk3328_read_hpd(dw_hdmi, data); }
static const struct dw_hdmi_phy_ops rk3228_hdmi_phy_ops = { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index a13473b2d54c..4a9c6ea7f15d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -396,8 +396,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, if (info->is_yuv) is_yuv = true;
- if (dst_w > 3840) { - DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); + if (dst_w > 4096) { + DRM_DEV_ERROR(vop->dev, "Maximum dst width (4096) exceeded\n"); return; }
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index e8523abef27a..4d2db079ad4f 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -203,12 +203,14 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
ret = drm_dev_register(ddev, 0); if (ret) - goto err_put; + goto err_unload;
drm_fbdev_dma_setup(ddev, 16);
return 0;
+err_unload: + drv_unload(ddev); err_put: drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 5576fdae4962..5aec1e58c968 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -1580,6 +1580,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev, ARRAY_SIZE(ltdc_drm_fmt_ycbcr_sp) + ARRAY_SIZE(ltdc_drm_fmt_ycbcr_fp)) * sizeof(*formats), GFP_KERNEL); + if (!formats) + return NULL;
for (i = 0; i < ldev->caps.pix_fmt_nb; i++) { drm_fmt = ldev->caps.pix_fmt_drm[i]; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index d57c4a5948c8..cb424604484f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -429,6 +429,7 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, { struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); enum drm_connector_status status = connector_status_disconnected; + int ret;
/* * NOTE: This function should really take vc4_hdmi->mutex, but @@ -441,7 +442,12 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, * the lock for now. */
- WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret) { + drm_err_once(connector->dev, "Failed to retain HDMI power domain: %d\n", + ret); + return connector_status_unknown; + }
if (vc4_hdmi->hpd_gpio) { if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 9731dcd0b1bd..d0bbb1d9b1ac 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -166,7 +166,8 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, struct xe_vm *vm, - enum xe_engine_class class, u32 flags) + enum xe_engine_class class, + u32 flags, u64 extensions) { struct xe_hw_engine *hwe, *hwe0 = NULL; enum xe_hw_engine_id id; @@ -186,7 +187,56 @@ struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe if (!logical_mask) return ERR_PTR(-ENODEV);
- return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0); + return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); +} + +/** + * xe_exec_queue_create_bind() - Create bind exec queue. + * @xe: Xe device. + * @tile: tile which bind exec queue belongs to. + * @flags: exec queue creation flags + * @extensions: exec queue creation extensions + * + * Normalize bind exec queue creation. Bind exec queue is tied to migration VM + * for access to physical memory required for page table programming. On a + * faulting devices the reserved copy engine instance must be used to avoid + * deadlocking (user binds cannot get stuck behind faults as kernel binds which + * resolve faults depend on user binds). On non-faulting devices any copy engine + * can be used. + * + * Returns exec queue on success, ERR_PTR on failure + */ +struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, + struct xe_tile *tile, + u32 flags, u64 extensions) +{ + struct xe_gt *gt = tile->primary_gt; + struct xe_exec_queue *q; + struct xe_vm *migrate_vm; + + migrate_vm = xe_migrate_get_vm(tile->migrate); + if (xe->info.has_usm) { + struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, + XE_ENGINE_CLASS_COPY, + gt->usm.reserved_bcs_instance, + false); + + if (!hwe) { + xe_vm_put(migrate_vm); + return ERR_PTR(-EINVAL); + } + + q = xe_exec_queue_create(xe, migrate_vm, + BIT(hwe->logical_instance), 1, hwe, + flags, extensions); + } else { + q = xe_exec_queue_create_class(xe, gt, migrate_vm, + XE_ENGINE_CLASS_COPY, flags, + extensions); + } + xe_vm_put(migrate_vm); + + return q; }
void xe_exec_queue_destroy(struct kref *ref) @@ -418,63 +468,6 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue return 0; }
-static const enum xe_engine_class user_to_xe_engine_class[] = { - [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, - [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, - [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, - [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, - [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, -}; - -static struct xe_hw_engine * -find_hw_engine(struct xe_device *xe, - struct drm_xe_engine_class_instance eci) -{ - u32 idx; - - if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) - return NULL; - - if (eci.gt_id >= xe->info.gt_count) - return NULL; - - idx = array_index_nospec(eci.engine_class, - ARRAY_SIZE(user_to_xe_engine_class)); - - return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id), - user_to_xe_engine_class[idx], - eci.engine_instance, true); -} - -static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt, - struct drm_xe_engine_class_instance *eci, - u16 width, u16 num_placements) -{ - struct xe_hw_engine *hwe; - enum xe_hw_engine_id id; - u32 logical_mask = 0; - - if (XE_IOCTL_DBG(xe, width != 1)) - return 0; - if (XE_IOCTL_DBG(xe, num_placements != 1)) - return 0; - if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) - return 0; - - eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY; - - for_each_hw_engine(hwe, gt, id) { - if (xe_hw_engine_is_reserved(hwe)) - continue; - - if (hwe->class == - user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY]) - logical_mask |= BIT(hwe->logical_instance); - } - - return logical_mask; -} - static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt, struct drm_xe_engine_class_instance *eci, u16 width, u16 num_placements) @@ -497,7 +490,7 @@ static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
n = j * width + i;
- hwe = find_hw_engine(xe, eci[n]); + hwe = xe_hw_engine_lookup(xe, eci[n]); if (XE_IOCTL_DBG(xe, !hwe)) return 0;
@@ -536,8 +529,9 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, struct drm_xe_engine_class_instance __user *user_eci = u64_to_user_ptr(args->instances); struct xe_hw_engine *hwe; - struct xe_vm *vm, *migrate_vm; + struct xe_vm *vm; struct xe_gt *gt; + struct xe_tile *tile; struct xe_exec_queue *q = NULL; u32 logical_mask; u32 id; @@ -562,37 +556,20 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, return -EINVAL;
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { - for_each_gt(gt, xe, id) { - struct xe_exec_queue *new; - u32 flags; - - if (xe_gt_is_media_type(gt)) - continue; - - eci[0].gt_id = gt->info.id; - logical_mask = bind_exec_queue_logical_mask(xe, gt, eci, - args->width, - args->num_placements); - if (XE_IOCTL_DBG(xe, !logical_mask)) - return -EINVAL; - - hwe = find_hw_engine(xe, eci[0]); - if (XE_IOCTL_DBG(xe, !hwe)) - return -EINVAL; - - /* The migration vm doesn't hold rpm ref */ - xe_pm_runtime_get_noresume(xe); - - flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0); + if (XE_IOCTL_DBG(xe, args->width != 1) || + XE_IOCTL_DBG(xe, args->num_placements != 1) || + XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) + return -EINVAL;
- migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate); - new = xe_exec_queue_create(xe, migrate_vm, logical_mask, - args->width, hwe, flags, - args->extensions); + for_each_tile(tile, xe, id) { + struct xe_exec_queue *new; + u32 flags = EXEC_QUEUE_FLAG_VM;
- xe_pm_runtime_put(xe); /* now held by engine */ + if (id) + flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
- xe_vm_put(migrate_vm); + new = xe_exec_queue_create_bind(xe, tile, flags, + args->extensions); if (IS_ERR(new)) { err = PTR_ERR(new); if (q) @@ -613,7 +590,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, if (XE_IOCTL_DBG(xe, !logical_mask)) return -EINVAL;
- hwe = find_hw_engine(xe, eci[0]); + hwe = xe_hw_engine_lookup(xe, eci[0]); if (XE_IOCTL_DBG(xe, !hwe)) return -EINVAL;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h index 289a3a51d2a2..f4ba8897763f 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.h +++ b/drivers/gpu/drm/xe/xe_exec_queue.h @@ -20,7 +20,11 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v u64 extensions); struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, struct xe_vm *vm, - enum xe_engine_class class, u32 flags); + enum xe_engine_class class, + u32 flags, u64 extensions); +struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, + struct xe_tile *tile, + u32 flags, u64 extensions);
void xe_exec_queue_fini(struct xe_exec_queue *q); void xe_exec_queue_destroy(struct kref *ref); diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 07ed9fd28f19..5c0ca61ccda7 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -5,7 +5,10 @@
#include "xe_hw_engine.h"
+#include <linux/nospec.h> + #include <drm/drm_managed.h> +#include <drm/xe_drm.h>
#include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" @@ -1135,3 +1138,31 @@ enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe) { return engine_infos[hwe->engine_id].domain; } + +static const enum xe_engine_class user_to_xe_engine_class[] = { + [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, + [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, + [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, + [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, + [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, +}; + +struct xe_hw_engine * +xe_hw_engine_lookup(struct xe_device *xe, + struct drm_xe_engine_class_instance eci) +{ + unsigned int idx; + + if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) + return NULL; + + if (eci.gt_id >= xe->info.gt_count) + return NULL; + + idx = array_index_nospec(eci.engine_class, + ARRAY_SIZE(user_to_xe_engine_class)); + + return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id), + user_to_xe_engine_class[idx], + eci.engine_instance, true); +} diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h index 900c8c991430..d227ffe557eb 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.h +++ b/drivers/gpu/drm/xe/xe_hw_engine.h @@ -9,6 +9,8 @@ #include "xe_hw_engine_types.h"
struct drm_printer; +struct drm_xe_engine_class_instance; +struct xe_device;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN #define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN @@ -62,6 +64,11 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p); void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe); + +struct xe_hw_engine * +xe_hw_engine_lookup(struct xe_device *xe, + struct drm_xe_engine_class_instance eci); + static inline bool xe_hw_engine_is_valid(struct xe_hw_engine *hwe) { return hwe->name; diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index c9f5673353ee..a849c48d8ac9 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -404,7 +404,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) m->q = xe_exec_queue_create_class(xe, primary_gt, vm, XE_ENGINE_CLASS_COPY, EXEC_QUEUE_FLAG_KERNEL | - EXEC_QUEUE_FLAG_PERMANENT); + EXEC_QUEUE_FLAG_PERMANENT, 0); } if (IS_ERR(m->q)) { xe_vm_close_and_put(vm); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 50e8fc49ba6c..b49bee0dfac5 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1412,19 +1412,13 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) /* Kernel migration VM shouldn't have a circular loop.. */ if (!(flags & XE_VM_FLAG_MIGRATION)) { for_each_tile(tile, xe, id) { - struct xe_gt *gt = tile->primary_gt; - struct xe_vm *migrate_vm; struct xe_exec_queue *q; u32 create_flags = EXEC_QUEUE_FLAG_VM;
if (!vm->pt_root[id]) continue;
- migrate_vm = xe_migrate_get_vm(tile->migrate); - q = xe_exec_queue_create_class(xe, gt, migrate_vm, - XE_ENGINE_CLASS_COPY, - create_flags); - xe_vm_put(migrate_vm); + q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); if (IS_ERR(q)) { err = PTR_ERR(q); goto err_close; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 2541fa2e0fa3..e86a37c3cf9c 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2322,6 +2322,9 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS3, 0); features->quirks &= ~WACOM_QUIRK_PEN_BUTTON3; break; + case WACOM_HID_WD_SEQUENCENUMBER: + wacom_wac->hid_data.sequence_number = -1; + break; } }
@@ -2446,9 +2449,15 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field wacom_wac->hid_data.barrelswitch3 = value; return; case WACOM_HID_WD_SEQUENCENUMBER: - if (wacom_wac->hid_data.sequence_number != value) - hid_warn(hdev, "Dropped %hu packets", (unsigned short)(value - wacom_wac->hid_data.sequence_number)); + if (wacom_wac->hid_data.sequence_number != value && + wacom_wac->hid_data.sequence_number >= 0) { + int sequence_size = field->logical_maximum - field->logical_minimum + 1; + int drop_count = (value - wacom_wac->hid_data.sequence_number) % sequence_size; + hid_warn(hdev, "Dropped %d packets", drop_count); + } wacom_wac->hid_data.sequence_number = value + 1; + if (wacom_wac->hid_data.sequence_number > field->logical_maximum) + wacom_wac->hid_data.sequence_number = field->logical_minimum; return; }
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 6ec499841f70..e6443740b462 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -324,7 +324,7 @@ struct hid_data { int bat_connected; int ps_connected; bool pad_input_event_flag; - unsigned short sequence_number; + int sequence_number; ktime_t time_delayed; };
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index 7ce9a89f93a0..0ccb5eb596fc 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -79,7 +79,7 @@ static const bool max16065_have_current[] = { };
struct max16065_data { - enum chips type; + enum chips chip; struct i2c_client *client; const struct attribute_group *groups[4]; struct mutex update_lock; @@ -114,9 +114,10 @@ static inline int LIMIT_TO_MV(int limit, int range) return limit * range / 256; }
-static inline int MV_TO_LIMIT(int mv, int range) +static inline int MV_TO_LIMIT(unsigned long mv, int range) { - return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255); + mv = clamp_val(mv, 0, ULONG_MAX / 256); + return DIV_ROUND_CLOSEST(clamp_val(mv * 256, 0, range * 255), range); }
static inline int ADC_TO_CURR(int adc, int gain) @@ -161,10 +162,17 @@ static struct max16065_data *max16065_update_device(struct device *dev) MAX16065_CURR_SENSE); }
- for (i = 0; i < DIV_ROUND_UP(data->num_adc, 8); i++) + for (i = 0; i < 2; i++) data->fault[i] = i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
+ /* + * MAX16067 and MAX16068 have separate undervoltage and + * overvoltage alarm bits. Squash them together. + */ + if (data->chip == max16067 || data->chip == max16068) + data->fault[0] |= data->fault[1]; + data->last_updated = jiffies; data->valid = true; } @@ -513,6 +521,7 @@ static int max16065_probe(struct i2c_client *client) if (unlikely(!data)) return -ENOMEM;
+ data->chip = chip; data->client = client; mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index ef75b63f5894..b5352900463f 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -62,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 }, [NTC_LAST] = { }, }; +MODULE_DEVICE_TABLE(platform, ntc_thermistor_id);
/* * A compensation table should be sorted by the values of .ohm diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c index ac70c0b491be..dab389a5507c 100644 --- a/drivers/hwtracing/coresight/coresight-dummy.c +++ b/drivers/hwtracing/coresight/coresight-dummy.c @@ -23,6 +23,9 @@ DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink"); static int dummy_source_enable(struct coresight_device *csdev, struct perf_event *event, enum cs_mode mode) { + if (!coresight_take_mode(csdev, mode)) + return -EBUSY; + dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0; @@ -31,6 +34,7 @@ static int dummy_source_enable(struct coresight_device *csdev, static void dummy_source_disable(struct coresight_device *csdev, struct perf_event *event) { + coresight_set_mode(csdev, CS_MODE_DISABLED); dev_dbg(csdev->dev.parent, "Dummy source disabled\n"); }
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index e75428fa1592..610ad51cda65 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -261,6 +261,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) { tmc_free_table_pages(sg_table); tmc_free_data_pages(sg_table); + kfree(sg_table); } EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@@ -342,7 +343,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, rc = tmc_alloc_table_pages(sg_table); if (rc) { tmc_free_sg_table(sg_table); - kfree(sg_table); return ERR_PTR(rc); }
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index 0726f8842552..5c5a4b3fe687 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -449,6 +449,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, return -EBUSY; }
+ if (!coresight_take_mode(csdev, mode)) { + spin_unlock(&drvdata->spinlock); + return -EBUSY; + } + __tpdm_enable(drvdata); drvdata->enable = true; spin_unlock(&drvdata->spinlock); @@ -506,6 +511,7 @@ static void tpdm_disable(struct coresight_device *csdev, }
__tpdm_disable(drvdata); + coresight_set_mode(csdev, CS_MODE_DISABLED); drvdata->enable = false; spin_unlock(&drvdata->spinlock);
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index ce8c4846b7fa..2a03a221e2dd 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -170,6 +170,13 @@ struct aspeed_i2c_bus {
static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus);
+/* precondition: bus.lock has been acquired. */ +static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) +{ + bus->master_state = ASPEED_I2C_MASTER_STOP; + writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); +} + static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) { unsigned long time_left, flags; @@ -187,7 +194,7 @@ static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) command);
reinit_completion(&bus->cmd_complete); - writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); + aspeed_i2c_do_stop(bus); spin_unlock_irqrestore(&bus->lock, flags);
time_left = wait_for_completion_timeout( @@ -390,13 +397,6 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) writel(command, bus->base + ASPEED_I2C_CMD_REG); }
-/* precondition: bus.lock has been acquired. */ -static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) -{ - bus->master_state = ASPEED_I2C_MASTER_STOP; - writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); -} - /* precondition: bus.lock has been acquired. */ static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) { diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index 33dbc19d3848..f59158489ad9 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -99,8 +99,7 @@ static int sch_transaction(void) if (retries > MAX_RETRIES) { dev_err(&sch_adapter.dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; - } - if (temp & 0x04) { + } else if (temp & 0x04) { result = -EIO; dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be " "locked until next hard reset. (sorry!)\n"); diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 19468565120e..d3ca7d2f81a6 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -844,23 +844,11 @@ static int xiic_bus_busy(struct xiic_i2c *i2c) return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0; }
-static int xiic_busy(struct xiic_i2c *i2c) +static int xiic_wait_not_busy(struct xiic_i2c *i2c) { int tries = 3; int err;
- if (i2c->tx_msg || i2c->rx_msg) - return -EBUSY; - - /* In single master mode bus can only be busy, when in use by this - * driver. If the register indicates bus being busy for some reason we - * should ignore it, since bus will never be released and i2c will be - * stuck forever. - */ - if (i2c->singlemaster) { - return 0; - } - /* for instance if previous transfer was terminated due to TX error * it might be that the bus is on it's way to become available * give it at most 3 ms to wake @@ -1104,13 +1092,36 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
mutex_lock(&i2c->lock);
- ret = xiic_busy(i2c); - if (ret) { + if (i2c->tx_msg || i2c->rx_msg) { dev_err(i2c->adap.dev.parent, "cannot start a transfer while busy\n"); + ret = -EBUSY; goto out; }
+ /* In single master mode bus can only be busy, when in use by this + * driver. If the register indicates bus being busy for some reason we + * should ignore it, since bus will never be released and i2c will be + * stuck forever. + */ + if (!i2c->singlemaster) { + ret = xiic_wait_not_busy(i2c); + if (ret) { + /* If the bus is stuck in a busy state, such as due to spurious low + * pulses on the bus causing a false start condition to be detected, + * then try to recover by re-initializing the controller and check + * again if the bus is still busy. + */ + dev_warn(i2c->adap.dev.parent, "I2C bus busy timeout, reinitializing\n"); + ret = xiic_reinit(i2c); + if (ret) + goto out; + ret = xiic_wait_not_busy(i2c); + if (ret) + goto out; + } + } + i2c->tx_msg = msgs; i2c->rx_msg = NULL; i2c->nmsgs = num; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 9aab7abc2ae9..88470602b789 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -120,6 +120,12 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_INIT_XSTATE BIT(17)
+/* + * Ignore the sub-state when matching mwait hints between the ACPI _CST and + * custom tables. + */ +#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18) + /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -1022,6 +1028,47 @@ static struct cpuidle_state spr_cstates[] __initdata = { .enter = NULL } };
+static struct cpuidle_state gnr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 170, + .target_residency = 650, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6P", + .desc = "MWAIT 0x21", + .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, + .exit_latency = 210, + .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1315,7 +1362,8 @@ static struct cpuidle_state srf_cstates[] __initdata = { { .name = "C6S", .desc = "MWAIT 0x22", - .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 270, .target_residency = 700, .enter = &intel_idle, @@ -1323,7 +1371,8 @@ static struct cpuidle_state srf_cstates[] __initdata = { { .name = "C6SP", .desc = "MWAIT 0x23", - .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 310, .target_residency = 900, .enter = &intel_idle, @@ -1453,6 +1502,12 @@ static const struct idle_cpu idle_cpu_spr __initconst = { .use_acpi = true, };
+static const struct idle_cpu idle_cpu_gnr __initconst = { + .state_table = gnr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1533,6 +1588,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr), + X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr), X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt), @@ -1692,7 +1748,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) } }
-static bool __init intel_idle_off_by_default(u32 mwait_hint) +static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) { int cstate, limit;
@@ -1709,7 +1765,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) * the interesting states are ACPI_CSTATE_FFH. */ for (cstate = 1; cstate < limit; cstate++) { - if (acpi_state_table.states[cstate].address == mwait_hint) + u32 acpi_hint = acpi_state_table.states[cstate].address; + u32 table_hint = mwait_hint; + + if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) { + acpi_hint &= ~MWAIT_SUBSTATE_MASK; + table_hint &= ~MWAIT_SUBSTATE_MASK; + } + + if (acpi_hint == table_hint) return false; } return true; @@ -1719,7 +1783,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint)
static inline bool intel_idle_acpi_cst_extract(void) { return false; } static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } -static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) +{ + return false; +} #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
/** @@ -2046,7 +2113,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && - intel_idle_off_by_default(mwait_hint) && + intel_idle_off_by_default(state->flags, mwait_hint) && !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) state->flags |= CPUIDLE_FLAG_OFF;
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index c321c6ef48df..8f9ee56c8bed 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -212,9 +212,9 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val) struct ad7606_state *st = iio_priv(indio_dev); DECLARE_BITMAP(values, 3);
- values[0] = val; + values[0] = val & GENMASK(2, 0);
- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc, + gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, st->gpio_os->info, values);
/* AD7616 requires a reset to update value */ @@ -419,7 +419,7 @@ static int ad7606_request_gpios(struct ad7606_state *st) return PTR_ERR(st->gpio_range);
st->gpio_standby = devm_gpiod_get_optional(dev, "standby", - GPIOD_OUT_HIGH); + GPIOD_OUT_LOW); if (IS_ERR(st->gpio_standby)) return PTR_ERR(st->gpio_standby);
@@ -662,7 +662,7 @@ static int ad7606_suspend(struct device *dev)
if (st->gpio_standby) { gpiod_set_value(st->gpio_range, 1); - gpiod_set_value(st->gpio_standby, 0); + gpiod_set_value(st->gpio_standby, 1); }
return 0; diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 263a778bcf25..287a0591533b 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -249,8 +249,9 @@ static int ad7616_sw_mode_config(struct iio_dev *indio_dev) static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); - unsigned long os[3] = {1}; + DECLARE_BITMAP(os, 3);
+ bitmap_fill(os, 3); /* * Software mode is enabled when all three oversampling * pins are set to high. If oversampling gpios are defined @@ -258,7 +259,7 @@ static int ad7606B_sw_mode_config(struct iio_dev *indio_dev) * otherwise, they must be hardwired to VDD */ if (st->gpio_os) { - gpiod_set_array_value(ARRAY_SIZE(os), + gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc, st->gpio_os->info, os); } /* OS of 128 and 256 are available only in software mode */ diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 500f56834b01..a6bf689833da 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -10,6 +10,7 @@ */ #include <linux/acpi.h> #include <linux/bitfield.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> @@ -52,6 +53,7 @@ struct bme680_calib { struct bme680_data { struct regmap *regmap; struct bme680_calib bme680; + struct mutex lock; /* Protect multiple serial R/W ops to device. */ u8 oversampling_temp; u8 oversampling_press; u8 oversampling_humid; @@ -827,6 +829,8 @@ static int bme680_read_raw(struct iio_dev *indio_dev, { struct bme680_data *data = iio_priv(indio_dev);
+ guard(mutex)(&data->lock); + switch (mask) { case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { @@ -871,6 +875,8 @@ static int bme680_write_raw(struct iio_dev *indio_dev, { struct bme680_data *data = iio_priv(indio_dev);
+ guard(mutex)(&data->lock); + if (val2 != 0) return -EINVAL;
@@ -967,6 +973,7 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, name = bme680_match_acpi_device(dev);
data = iio_priv(indio_dev); + mutex_init(&data->lock); dev_set_drvdata(dev, indio_dev); data->regmap = regmap; indio_dev->name = name; diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index dd466c5fa621..ccbebe5b66cd 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -1081,7 +1081,6 @@ static const struct of_device_id ak8975_of_match[] = { { .compatible = "asahi-kasei,ak09912", .data = &ak_def_array[AK09912] }, { .compatible = "ak09912", .data = &ak_def_array[AK09912] }, { .compatible = "asahi-kasei,ak09916", .data = &ak_def_array[AK09916] }, - { .compatible = "ak09916", .data = &ak_def_array[AK09916] }, {} }; MODULE_DEVICE_TABLE(of, ak8975_of_match); diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 6791df64a5fe..b7c078b7f7cf 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1640,8 +1640,10 @@ int ib_cache_setup_one(struct ib_device *device)
rdma_for_each_port (device, p) { err = ib_cache_update(device, p, true, true, true); - if (err) + if (err) { + gid_table_cleanup_one(device); return err; + } }
return 0; diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1a6339f3a63f..7e3a55349e10 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -1182,7 +1182,7 @@ static int __init iw_cm_init(void) if (ret) return ret;
- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0); + iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM); if (!iwcm_wq) goto err_alloc;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7c757351a016..982e85ba211b 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1042,6 +1042,8 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / sizeof(struct bnxt_qplib_sge)); + if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2)) + psn_nume = roundup_pow_of_two(psn_nume); bytes += (psn_nume * psn_sz); }
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 040ba2224f9f..b3757c6a0457 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1222,6 +1222,8 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) int ret;
ep = lookup_atid(t, atid); + if (!ep) + return -EINVAL;
pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); @@ -2279,6 +2281,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) int ret = 0;
ep = lookup_atid(t, atid); + if (!ep) + return -EINVAL; + la = (struct sockaddr_in *)&ep->com.local_addr; ra = (struct sockaddr_in *)&ep->com.remote_addr; la6 = (struct sockaddr_in6 *)&ep->com.local_addr; diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index d7e1cbf9f5c2..7f6e3eaca3bb 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -1544,11 +1544,31 @@ int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, return ret; }
+static enum ib_qp_state query_qp_state(struct erdma_qp *qp) +{ + switch (qp->attrs.state) { + case ERDMA_QP_STATE_IDLE: + return IB_QPS_INIT; + case ERDMA_QP_STATE_RTR: + return IB_QPS_RTR; + case ERDMA_QP_STATE_RTS: + return IB_QPS_RTS; + case ERDMA_QP_STATE_CLOSING: + return IB_QPS_ERR; + case ERDMA_QP_STATE_TERMINATE: + return IB_QPS_ERR; + case ERDMA_QP_STATE_ERROR: + return IB_QPS_ERR; + default: + return IB_QPS_ERR; + } +} + int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { - struct erdma_qp *qp; struct erdma_dev *dev; + struct erdma_qp *qp;
if (ibqp && qp_attr && qp_init_attr) { qp = to_eqp(ibqp); @@ -1575,6 +1595,9 @@ int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
+ qp_attr->qp_state = query_qp_state(qp); + qp_attr->cur_qp_state = query_qp_state(qp); + return 0; }
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 3e02c474f59f..4fc5b9d5fea8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -64,8 +64,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, u8 tc_mode = 0; int ret;
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) - return -EOPNOTSUPP; + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) { + ret = -EOPNOTSUPP; + goto err_out; + }
ah->av.port = rdma_ah_get_port_num(ah_attr); ah->av.gid_index = grh->sgid_index; @@ -83,7 +85,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, ret = 0;
if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) - return ret; + goto err_out;
if (tc_mode == HNAE3_TC_MAP_MODE_DSCP && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) @@ -91,8 +93,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, else ah->av.sl = rdma_ah_get_sl(ah_attr);
- if (!check_sl_valid(hr_dev, ah->av.sl)) - return -EINVAL; + if (!check_sl_valid(hr_dev, ah->av.sl)) { + ret = -EINVAL; + goto err_out; + }
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 02baa853a76c..c7c167e2a045 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1041,9 +1041,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level) * @bt_level: base address table level * @unit: ba entries per bt page */ -static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) +static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit) { - u32 step; + u64 step; int max; int i;
@@ -1079,7 +1079,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, { struct hns_roce_buf_region *r; int total = 0; - int step; + u64 step; int i;
for (i = 0; i < region_cnt; i++) { @@ -1110,7 +1110,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, int ret = 0; int max_ofs; int level; - u32 step; + u64 step; int end;
if (hopnum <= 1) @@ -1134,10 +1134,12 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
/* config L1 bt to last bt and link them to corresponding parent */ for (level = 1; level < hopnum; level++) { - cur = hem_list_search_item(&mid_bt[level], offset); - if (cur) { - hem_ptrs[level] = cur; - continue; + if (!hem_list_is_bottom_bt(hopnum, level)) { + cur = hem_list_search_item(&mid_bt[level], offset); + if (cur) { + hem_ptrs[level] = cur; + continue; + } }
step = hem_list_calc_ba_range(hopnum, level, unit); @@ -1147,7 +1149,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, }
start_aligned = (distance / step) * step + r->offset; - end = min_t(int, start_aligned + step - 1, max_ofs); + end = min_t(u64, start_aligned + step - 1, max_ofs); cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit, true); if (!cur) { @@ -1235,7 +1237,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base, struct hns_roce_hem_item *hem, *temp_hem; int total = 0; int offset; - int step; + u64 step;
step = hem_list_calc_ba_range(r->hopnum, 1, unit); if (step < 1) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 621b057fb9da..24e906b9d3ae 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1681,8 +1681,8 @@ static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) { bd_idx = i / CNT_PER_DESC; - if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) && - bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC) + if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC && + !(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT))) break;
cnt_data = (__le64 *)&desc[bd_idx].data[0]; @@ -2972,6 +2972,9 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + free_mr_exit(hr_dev); + hns_roce_function_clear(hr_dev);
if (!hr_dev->is_vf) @@ -4423,12 +4426,14 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, upper_32_bits(to_hr_hw_page_addr(mtts[0]))); hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
- context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1])); - qpc_mask->rq_nxt_blk_addr = 0; - - hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, - upper_32_bits(to_hr_hw_page_addr(mtts[1]))); - hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + context->rq_nxt_blk_addr = + cpu_to_le32(to_hr_hw_page_addr(mtts[1])); + qpc_mask->rq_nxt_blk_addr = 0; + hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H, + upper_32_bits(to_hr_hw_page_addr(mtts[1]))); + hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H); + }
return 0; } @@ -6193,6 +6198,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, struct pci_dev *pdev = hr_dev->pci_dev; struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); const struct hnae3_ae_ops *ops = ae_dev->ops; + enum hnae3_reset_type reset_type; irqreturn_t int_work = IRQ_NONE; u32 int_en;
@@ -6204,10 +6210,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);
+ reset_type = hr_dev->is_vf ? + HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET; + /* Set reset level for reset_event() */ if (ops->set_default_reset_request) - ops->set_default_reset_request(ae_dev, - HNAE3_FUNC_RESET); + ops->set_default_reset_request(ae_dev, reset_type); if (ops->reset_event) ops->reset_event(pdev, NULL);
@@ -6277,7 +6285,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data) res_type == ECC_RESOURCE_SCCC) return le64_to_cpu(*data);
- return le64_to_cpu(*data) << PAGE_SHIFT; + return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT; }
static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type, @@ -6949,9 +6957,6 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; hns_roce_handle_device_err(hr_dev);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) - free_mr_exit(hr_dev); - hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 1de384ce4d0e..6b03ba671ff8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1460,19 +1460,19 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __acquire(&send_cq->lock); __acquire(&recv_cq->lock); } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); __acquire(&send_cq->lock); } else if (send_cq == recv_cq) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { - spin_lock_irq(&send_cq->lock); + spin_lock(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { - spin_lock_irq(&recv_cq->lock); + spin_lock(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } @@ -1492,13 +1492,13 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, spin_unlock(&recv_cq->lock); } else if (send_cq == recv_cq) { __release(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); - spin_unlock_irq(&send_cq->lock); + spin_unlock(&send_cq->lock); } else { spin_unlock(&send_cq->lock); - spin_unlock_irq(&recv_cq->lock); + spin_unlock(&recv_cq->lock); } }
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index fc0ce35da14e..c7475c925d1b 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -1347,7 +1347,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { ibdev_err(&iwdev->ibdev, "rd_atomic = %d, above max_hw_ird=%d\n", - attr->max_rd_atomic, + attr->max_dest_rd_atomic, dev->hw_attrs.max_hw_ird); return -EINVAL; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6048b9ad13bb..5926fd07a602 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -550,7 +550,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, if (!ndev) goto out;
- if (dev->lag_active) { + if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) { rcu_read_lock(); upper = netdev_master_upper_dev_get_rcu(ndev); if (upper) { diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d5eb1b726675..85118b7cb63d 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -796,6 +796,7 @@ struct mlx5_cache_ent { u8 is_tmp:1; u8 disabled:1; u8 fill_to_high_water:1; + u8 tmp_cleanup_scheduled:1;
/* * - limit is the low water mark for stored mkeys, 2* limit is the @@ -827,7 +828,6 @@ struct mlx5_mkey_cache { struct mutex rb_lock; struct dentry *fs_root; unsigned long last_add; - struct delayed_work remove_ent_dwork; };
struct mlx5_ib_port_resources { diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 98bd8eaa393e..e4db3a9569c1 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -48,6 +48,7 @@ enum { MAX_PENDING_REG_MR = 8, };
+#define MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS 4 #define MLX5_UMR_ALIGN 2048
static void @@ -211,9 +212,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
spin_lock_irqsave(&ent->mkeys_queue.lock, flags); push_mkey_locked(ent, mkey_out->mkey); + ent->pending--; /* If we are doing fill_to_high_water then keep going. */ queue_adjust_cache_locked(ent); - ent->pending--; spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); kfree(mkey_out); } @@ -527,6 +528,21 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) } }
+static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) +{ + u32 mkey; + + spin_lock_irq(&ent->mkeys_queue.lock); + while (ent->mkeys_queue.ci) { + mkey = pop_mkey_locked(ent); + spin_unlock_irq(&ent->mkeys_queue.lock); + mlx5_core_destroy_mkey(dev->mdev, mkey); + spin_lock_irq(&ent->mkeys_queue.lock); + } + ent->tmp_cleanup_scheduled = false; + spin_unlock_irq(&ent->mkeys_queue.lock); +} + static void __cache_work_func(struct mlx5_cache_ent *ent) { struct mlx5_ib_dev *dev = ent->dev; @@ -598,7 +614,11 @@ static void delayed_cache_work_func(struct work_struct *work) struct mlx5_cache_ent *ent;
ent = container_of(work, struct mlx5_cache_ent, dwork.work); - __cache_work_func(ent); + /* temp entries are never filled, only cleaned */ + if (ent->is_tmp) + clean_keys(ent->dev, ent); + else + __cache_work_func(ent); }
static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1, @@ -659,6 +679,7 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, { struct rb_node *node = dev->cache.rb_root.rb_node; struct mlx5_cache_ent *cur, *smallest = NULL; + u64 ndescs_limit; int cmp;
/* @@ -677,10 +698,18 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev, return cur; }
+ /* + * Limit the usage of mkeys larger than twice the required size while + * also allowing the usage of smallest cache entry for small MRs. + */ + ndescs_limit = max_t(u64, rb_key.ndescs * 2, + MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS); + return (smallest && smallest->rb_key.access_mode == rb_key.access_mode && smallest->rb_key.access_flags == rb_key.access_flags && - smallest->rb_key.ats == rb_key.ats) ? + smallest->rb_key.ats == rb_key.ats && + smallest->rb_key.ndescs <= ndescs_limit) ? smallest : NULL; } @@ -765,21 +794,6 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, return _mlx5_mr_cache_alloc(dev, ent, access_flags); }
-static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) -{ - u32 mkey; - - cancel_delayed_work(&ent->dwork); - spin_lock_irq(&ent->mkeys_queue.lock); - while (ent->mkeys_queue.ci) { - mkey = pop_mkey_locked(ent); - spin_unlock_irq(&ent->mkeys_queue.lock); - mlx5_core_destroy_mkey(dev->mdev, mkey); - spin_lock_irq(&ent->mkeys_queue.lock); - } - spin_unlock_irq(&ent->mkeys_queue.lock); -} - static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) { if (!mlx5_debugfs_root || dev->is_rep) @@ -892,10 +906,6 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, ent->limit = 0;
mlx5_mkey_cache_debugfs_add_ent(dev, ent); - } else { - mod_delayed_work(ent->dev->cache.wq, - &ent->dev->cache.remove_ent_dwork, - msecs_to_jiffies(30 * 1000)); }
return ent; @@ -906,35 +916,6 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, return ERR_PTR(ret); }
-static void remove_ent_work_func(struct work_struct *work) -{ - struct mlx5_mkey_cache *cache; - struct mlx5_cache_ent *ent; - struct rb_node *cur; - - cache = container_of(work, struct mlx5_mkey_cache, - remove_ent_dwork.work); - mutex_lock(&cache->rb_lock); - cur = rb_last(&cache->rb_root); - while (cur) { - ent = rb_entry(cur, struct mlx5_cache_ent, node); - cur = rb_prev(cur); - mutex_unlock(&cache->rb_lock); - - spin_lock_irq(&ent->mkeys_queue.lock); - if (!ent->is_tmp) { - spin_unlock_irq(&ent->mkeys_queue.lock); - mutex_lock(&cache->rb_lock); - continue; - } - spin_unlock_irq(&ent->mkeys_queue.lock); - - clean_keys(ent->dev, ent); - mutex_lock(&cache->rb_lock); - } - mutex_unlock(&cache->rb_lock); -} - int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) { struct mlx5_mkey_cache *cache = &dev->cache; @@ -950,7 +931,6 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) mutex_init(&dev->slow_path_mutex); mutex_init(&dev->cache.rb_lock); dev->cache.rb_root = RB_ROOT; - INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func); cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); if (!cache->wq) { mlx5_ib_warn(dev, "failed to create work queue\n"); @@ -962,7 +942,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) mlx5_mkey_cache_debugfs_init(dev); mutex_lock(&cache->rb_lock); for (i = 0; i <= mkey_cache_max_order(dev); i++) { - rb_key.ndescs = 1 << (i + 2); + rb_key.ndescs = MLX5_MR_CACHE_PERSISTENT_ENTRY_MIN_DESCS << i; ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); if (IS_ERR(ent)) { ret = PTR_ERR(ent); @@ -1001,7 +981,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) return;
mutex_lock(&dev->cache.rb_lock); - cancel_delayed_work(&dev->cache.remove_ent_dwork); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); spin_lock_irq(&ent->mkeys_queue.lock); @@ -1852,8 +1831,18 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
- if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { + ent = mr->mmkey.cache_ent; + /* upon storing to a clean temp entry - schedule its cleanup */ + spin_lock_irq(&ent->mkeys_queue.lock); + if (ent->is_tmp && !ent->tmp_cleanup_scheduled) { + mod_delayed_work(ent->dev->cache.wq, &ent->dwork, + msecs_to_jiffies(30 * 1000)); + ent->tmp_cleanup_scheduled = true; + } + spin_unlock_irq(&ent->mkeys_queue.lock); return 0; + }
if (ent) { spin_lock_irq(&ent->mkeys_queue.lock); diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index ffc31b01f690..8823ecc84e60 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -224,6 +224,9 @@ int mlx5r_umr_init(struct mlx5_ib_dev *dev)
void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev) { + if (!dev->umrc.pd) + return; + mutex_destroy(&dev->umrc.init_lock); ib_dealloc_pd(dev->umrc.pd); } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 88106cf5ce55..84d2dfcd20af 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -626,6 +626,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) */ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) return; + clt_path->s.hb_missed_cnt = 0; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); if (imm_type == RTRS_IO_RSP_IMM || @@ -643,7 +644,6 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) return rtrs_clt_recv_done(con, wc); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - clt_path->s.hb_missed_cnt = 0; clt_path->s.hb_cur_latency = ktime_sub(ktime_get(), clt_path->s.hb_last_sent); if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) @@ -670,6 +670,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) /* * Key invalidations from server side */ + clt_path->s.hb_missed_cnt = 0; WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || wc->wc_flags & IB_WC_WITH_IMM)); WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); @@ -2346,6 +2347,12 @@ static int init_conns(struct rtrs_clt_path *clt_path) if (err) goto destroy; } + + /* + * Set the cid to con_num - 1, since if we fail later, we want to stay in bounds. + */ + cid = clt_path->s.con_num - 1; + err = alloc_path_reqs(clt_path); if (err) goto destroy; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 1d33efb8fb03..94ac99a4f696 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -1229,6 +1229,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) */ if (WARN_ON(wc->wr_cqe != &io_comp_cqe)) return; + srv_path->s.hb_missed_cnt = 0; err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index 1b0279393df4..5acaffb7f6e1 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -627,7 +627,7 @@ static int adp5588_setup(struct adp5588_kpad *kpad)
for (i = 0; i < KEYP_MAX_EVENT; i++) { ret = adp5588_read(client, KEY_EVENTA); - if (ret) + if (ret < 0) return ret; }
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index c086dadb45e3..058f3470b7ae 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1067,7 +1067,7 @@ static ssize_t ims_pcu_attribute_store(struct device *dev, if (data_len > attr->field_length) return -EINVAL;
- scoped_cond_guard(mutex, return -EINTR, &pcu->cmd_mutex) { + scoped_cond_guard(mutex_intr, return -EINTR, &pcu->cmd_mutex) { memset(field, 0, attr->field_length); memcpy(field, buf, data_len);
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index bad238f69a7a..34d1f07ea4c3 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1120,6 +1120,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, .driver_data = (void *)(SERIO_QUIRK_NOLOOP) }, + /* + * Some TongFang barebones have touchpad and/or keyboard issues after + * suspend fixable with nomux + reset + noloop + nopnp. Luckily, none of + * them have an external PS/2 port so this can safely be set for all of + * them. + * TongFang barebones come with board_vendor and/or system_vendor set to + * a different value for each individual reseller. The only somewhat + * universal way to identify them is by board_name. + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxXGxX"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c index 3eb762896345..5a807ad72319 100644 --- a/drivers/input/touchscreen/ilitek_ts_i2c.c +++ b/drivers/input/touchscreen/ilitek_ts_i2c.c @@ -37,6 +37,8 @@ #define ILITEK_TP_CMD_GET_MCU_VER 0x61 #define ILITEK_TP_CMD_GET_IC_MODE 0xC0
+#define ILITEK_TP_I2C_REPORT_ID 0x48 + #define REPORT_COUNT_ADDRESS 61 #define ILITEK_SUPPORT_MAX_POINT 40
@@ -160,15 +162,19 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) error = ilitek_i2c_write_and_read(ts, NULL, 0, 0, buf, 64); if (error) { dev_err(dev, "get touch info failed, err:%d\n", error); - goto err_sync_frame; + return error; + } + + if (buf[0] != ILITEK_TP_I2C_REPORT_ID) { + dev_err(dev, "get touch info failed. Wrong id: 0x%02X\n", buf[0]); + return -EINVAL; }
report_max_point = buf[REPORT_COUNT_ADDRESS]; if (report_max_point > ts->max_tp) { dev_err(dev, "FW report max point:%d > panel info. max:%d\n", report_max_point, ts->max_tp); - error = -EINVAL; - goto err_sync_frame; + return -EINVAL; }
count = DIV_ROUND_UP(report_max_point, packet_max_point); @@ -178,7 +184,7 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) if (error) { dev_err(dev, "get touch info. failed, cnt:%d, err:%d\n", count, error); - goto err_sync_frame; + return error; } }
@@ -203,10 +209,10 @@ static int ilitek_process_and_report_v6(struct ilitek_ts_data *ts) ilitek_touch_down(ts, id, x, y); }
-err_sync_frame: input_mt_sync_frame(input); input_sync(input); - return error; + + return 0; }
/* APIs of cmds for ILITEK Touch IC */ diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c index f788db15cd76..b956e4050f38 100644 --- a/drivers/interconnect/icc-clk.c +++ b/drivers/interconnect/icc-clk.c @@ -87,6 +87,7 @@ struct icc_provider *icc_clk_register(struct device *dev, onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL); if (!onecell) return ERR_PTR(-ENOMEM); + onecell->num_nodes = 2 * num_clocks;
qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL); if (!qp) @@ -133,8 +134,6 @@ struct icc_provider *icc_clk_register(struct device *dev, onecell->nodes[j++] = node; }
- onecell->num_nodes = j; - ret = icc_provider_register(provider); if (ret) goto err; diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c index b321c3009acb..885a9d3f92e4 100644 --- a/drivers/interconnect/qcom/sm8350.c +++ b/drivers/interconnect/qcom/sm8350.c @@ -1965,6 +1965,7 @@ static struct platform_driver qnoc_driver = { .driver = { .name = "qnoc-sm8350", .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, }, }; module_platform_driver(qnoc_driver); diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 1074ee25064d..05aed3cb46f1 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -574,20 +574,24 @@ static void v1_free_pgtable(struct io_pgtable *iop) pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist); + iommu_put_pages_list(&freelist);
/* Update data structure */ amd_iommu_domain_clr_pt_root(dom);
/* Make changes visible to IOMMUs */ amd_iommu_domain_update(dom); - - iommu_put_pages_list(&freelist); }
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) { struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+ pgtable->root = iommu_alloc_page(GFP_KERNEL); + if (!pgtable->root) + return NULL; + pgtable->mode = PAGE_MODE_3_LEVEL; + cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES; cfg->ias = IOMMU_IN_ADDR_BIT_SIZE; cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE; diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index 664e91c88748..f9227cbf75df 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -51,7 +51,7 @@ static inline u64 set_pgtable_attr(u64 *page) u64 prot;
prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER; - prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY; + prot |= IOMMU_PAGE_ACCESS;
return (iommu_virt_to_phys(page) | prot); } @@ -362,7 +362,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo struct protection_domain *pdom = (struct protection_domain *)cookie; int ias = IOMMU_IN_ADDR_BIT_SIZE;
- pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC); + pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_KERNEL); if (!pgtable->pgd) return NULL;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b19e8c0f48fa..1a61f14459e4 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -52,8 +52,6 @@ #define HT_RANGE_START (0xfd00000000ULL) #define HT_RANGE_END (0xffffffffffULL)
-#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL - static DEFINE_SPINLOCK(pd_bitmap_lock);
LIST_HEAD(ioapic_map); @@ -1552,8 +1550,8 @@ void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data, ioasid_t pasid) { - amd_iommu_dev_flush_pasid_pages(dev_data, 0, - CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid); + amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, + CMD_INV_IOMMU_ALL_PAGES_ADDRESS); }
void amd_iommu_domain_flush_complete(struct protection_domain *domain) @@ -2185,11 +2183,12 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); iommu_dev = ERR_PTR(ret); iommu_ignore_device(iommu, dev); - } else { - amd_iommu_set_pci_msi_domain(dev, iommu); - iommu_dev = &iommu->iommu; + goto out_err; }
+ amd_iommu_set_pci_msi_domain(dev, iommu); + iommu_dev = &iommu->iommu; + /* * If IOMMU and device supports PASID then it will contain max * supported PASIDs, else it will be zero. @@ -2201,6 +2200,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) pci_max_pasids(to_pci_dev(dev))); }
+out_err: iommu_completion_wait(iommu);
return iommu_dev; @@ -2265,47 +2265,17 @@ void protection_domain_free(struct protection_domain *domain) if (domain->iop.pgtbl_cfg.tlb) free_io_pgtable_ops(&domain->iop.iop.ops);
- if (domain->iop.root) - iommu_free_page(domain->iop.root); - if (domain->id) domain_id_free(domain->id);
kfree(domain); }
-static int protection_domain_init_v1(struct protection_domain *domain, int mode) -{ - u64 *pt_root = NULL; - - BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); - - if (mode != PAGE_MODE_NONE) { - pt_root = iommu_alloc_page(GFP_KERNEL); - if (!pt_root) - return -ENOMEM; - } - - domain->pd_mode = PD_MODE_V1; - amd_iommu_domain_set_pgtable(domain, pt_root, mode); - - return 0; -} - -static int protection_domain_init_v2(struct protection_domain *pdom) -{ - pdom->pd_mode = PD_MODE_V2; - pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; - - return 0; -} - struct protection_domain *protection_domain_alloc(unsigned int type) { struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; int pgtable; - int ret;
domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) @@ -2341,18 +2311,14 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
switch (pgtable) { case AMD_IOMMU_V1: - ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL); + domain->pd_mode = PD_MODE_V1; break; case AMD_IOMMU_V2: - ret = protection_domain_init_v2(domain); + domain->pd_mode = PD_MODE_V2; break; default: - ret = -EINVAL; - break; - } - - if (ret) goto out_err; + }
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); if (!pgtbl_ops) @@ -2405,10 +2371,10 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; + domain->domain.pgsize_bitmap = domain->iop.iop.cfg.pgsize_bitmap;
if (iommu) { domain->domain.type = type; - domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; domain->domain.ops = iommu->iommu.ops->default_domain_ops;
if (dirty_tracking) @@ -2867,7 +2833,6 @@ const struct iommu_ops amd_iommu_ops = { .device_group = amd_iommu_device_group, .get_resv_regions = amd_iommu_get_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, - .pgsize_bitmap = AMD_IOMMU_PGSIZES, .def_domain_type = amd_iommu_def_domain_type, .dev_enable_feat = amd_iommu_dev_enable_feature, .dev_disable_feat = amd_iommu_dev_disable_feature, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ed2b106e02dd..f490385c1360 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3062,8 +3062,8 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags, return ERR_PTR(-EOPNOTSUPP);
smmu_domain = arm_smmu_domain_alloc(); - if (!smmu_domain) - return ERR_PTR(-ENOMEM); + if (IS_ERR(smmu_domain)) + return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 36c6b36ad4ff..6372f3e25c4b 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -282,6 +282,20 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) u32 smr; int i;
+ /* + * MSM8998 LPASS SMMU reports 13 context banks, but accessing + * the last context bank crashes the system. + */ + if (of_device_is_compatible(smmu->dev->of_node, "qcom,msm8998-smmu-v2") && + smmu->num_context_banks == 13) { + smmu->num_context_banks = 12; + } else if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2")) { + if (smmu->num_context_banks == 21) /* SDM630 / SDM660 A2NOC SMMU */ + smmu->num_context_banks = 7; + else if (smmu->num_context_banks == 14) /* SDM630 / SDM660 LPASS SMMU */ + smmu->num_context_banks = 13; + } + /* * Some platforms support more than the Arm SMMU architected maximum of * 128 stream matching groups. For unknown reasons, the additional @@ -338,6 +352,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) return 0; }
+static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device *smmu) +{ + /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */ + smmu->features &= ~ARM_SMMU_FEAT_FMT_AARCH64_16K; + + /* TZ protects several last context banks, hide them from Linux */ + if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm630-smmu-v2") && + smmu->num_context_banks == 5) + smmu->num_context_banks = 2; + + return 0; +} + static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; @@ -436,6 +463,7 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { .init_context = qcom_adreno_smmu_init_context, + .cfg_probe = qcom_adreno_smmuv2_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, .write_sctlr = qcom_adreno_smmu_write_sctlr, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 723273440c21..8321962b3714 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -417,7 +417,7 @@ void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx, void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx, const struct arm_smmu_context_fault_info *cfi) { - dev_dbg(smmu->dev, + dev_err(smmu->dev, "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index aefde4443671..d06bf6e6c19f 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -225,7 +225,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || !user_data->len || !ops->domain_alloc_user) return ERR_PTR(-EOPNOTSUPP); - if (parent->auto_domain || !parent->nest_parent) + if (parent->auto_domain || !parent->nest_parent || + parent->common.domain->owner != ops) return ERR_PTR(-EINVAL);
hwpt_nested = __iommufd_object_alloc( diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 05fd9d3abf1b..9f193c933de6 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -112,6 +112,7 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, unsigned long page_offset = uptr % PAGE_SIZE; struct interval_tree_double_span_iter used_span; struct interval_tree_span_iter allowed_span; + unsigned long max_alignment = PAGE_SIZE; unsigned long iova_alignment;
lockdep_assert_held(&iopt->iova_rwsem); @@ -131,6 +132,13 @@ static int iopt_alloc_iova(struct io_pagetable *iopt, unsigned long *iova, roundup_pow_of_two(length), 1UL << __ffs64(uptr));
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE + max_alignment = HPAGE_SIZE; +#endif + /* Protect against ALIGN() overflow */ + if (iova_alignment >= max_alignment) + iova_alignment = max_alignment; + if (iova_alignment < iopt->iova_alignment) return -EINVAL;
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 222cfc11ebfd..4a279b8f02cb 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -1342,7 +1342,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, unsigned long page_size, void __user *uptr, u32 flags) { - unsigned long bitmap_size, i, max; + unsigned long i, max; struct iommu_test_cmd *cmd = ucmd->cmd; struct iommufd_hw_pagetable *hwpt; struct mock_iommu_domain *mock; @@ -1363,15 +1363,14 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, }
max = length / page_size; - bitmap_size = DIV_ROUND_UP(max, BITS_PER_BYTE); - - tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); + tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long), + GFP_KERNEL_ACCOUNT); if (!tmp) { rc = -ENOMEM; goto out_put; }
- if (copy_from_user(tmp, uptr, bitmap_size)) { + if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) { rc = -EFAULT; goto out_free; } diff --git a/drivers/leds/leds-bd2606mvv.c b/drivers/leds/leds-bd2606mvv.c index 3fda712d2f80..c1181a35d0f7 100644 --- a/drivers/leds/leds-bd2606mvv.c +++ b/drivers/leds/leds-bd2606mvv.c @@ -69,16 +69,14 @@ static const struct regmap_config bd2606mvv_regmap = {
static int bd2606mvv_probe(struct i2c_client *client) { - struct fwnode_handle *np, *child; struct device *dev = &client->dev; struct bd2606mvv_priv *priv; struct fwnode_handle *led_fwnodes[BD2606_MAX_LEDS] = { 0 }; int active_pairs[BD2606_MAX_LEDS / 2] = { 0 }; int err, reg; - int i; + int i, j;
- np = dev_fwnode(dev); - if (!np) + if (!dev_fwnode(dev)) return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -94,20 +92,18 @@ static int bd2606mvv_probe(struct i2c_client *client)
i2c_set_clientdata(client, priv);
- fwnode_for_each_available_child_node(np, child) { + device_for_each_child_node_scoped(dev, child) { struct bd2606mvv_led *led;
err = fwnode_property_read_u32(child, "reg", ®); - if (err) { - fwnode_handle_put(child); + if (err) return err; - } - if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) { - fwnode_handle_put(child); + + if (reg < 0 || reg >= BD2606_MAX_LEDS || led_fwnodes[reg]) return -EINVAL; - } + led = &priv->leds[reg]; - led_fwnodes[reg] = child; + led_fwnodes[reg] = fwnode_handle_get(child); active_pairs[reg / 2]++; led->priv = priv; led->led_no = reg; @@ -130,7 +126,8 @@ static int bd2606mvv_probe(struct i2c_client *client) &priv->leds[i].ldev, &init_data); if (err < 0) { - fwnode_handle_put(child); + for (j = i; j < BD2606_MAX_LEDS; j++) + fwnode_handle_put(led_fwnodes[j]); return dev_err_probe(dev, err, "couldn't register LED %s\n", priv->leds[i].ldev.name); diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 83fcd7b6afff..4d1612d557c8 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -150,7 +150,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) { struct fwnode_handle *child; struct gpio_leds_priv *priv; - int count, ret; + int count, used, ret;
count = device_get_child_node_count(dev); if (!count) @@ -159,9 +159,11 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) priv = devm_kzalloc(dev, struct_size(priv, leds, count), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); + priv->num_leds = count; + used = 0;
device_for_each_child_node(dev, child) { - struct gpio_led_data *led_dat = &priv->leds[priv->num_leds]; + struct gpio_led_data *led_dat = &priv->leds[used]; struct gpio_led led = {};
/* @@ -197,8 +199,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev) /* Set gpiod label to match the corresponding LED name. */ gpiod_set_consumer_name(led_dat->gpiod, led_dat->cdev.dev->kobj.name); - priv->num_leds++; + used++; } + priv->num_leds = used;
return priv; } diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c index 78215dff1499..3e56ce90b4b9 100644 --- a/drivers/leds/leds-pca995x.c +++ b/drivers/leds/leds-pca995x.c @@ -102,16 +102,14 @@ static const struct regmap_config pca995x_regmap = { static int pca995x_probe(struct i2c_client *client) { struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 }; - struct fwnode_handle *np, *child; struct device *dev = &client->dev; struct pca995x_chip *chip; struct pca995x_led *led; - int i, btype, reg, ret; + int i, j, btype, reg, ret;
btype = (unsigned long)device_get_match_data(&client->dev);
- np = dev_fwnode(dev); - if (!np) + if (!dev_fwnode(dev)) return -ENODEV;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); @@ -125,20 +123,16 @@ static int pca995x_probe(struct i2c_client *client)
i2c_set_clientdata(client, chip);
- fwnode_for_each_available_child_node(np, child) { + device_for_each_child_node_scoped(dev, child) { ret = fwnode_property_read_u32(child, "reg", ®); - if (ret) { - fwnode_handle_put(child); + if (ret) return ret; - }
- if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) { - fwnode_handle_put(child); + if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) return -EINVAL; - }
led = &chip->leds[reg]; - led_fwnodes[reg] = child; + led_fwnodes[reg] = fwnode_handle_get(child); led->chip = chip; led->led_no = reg; led->ldev.brightness_set_blocking = pca995x_brightness_set; @@ -157,7 +151,8 @@ static int pca995x_probe(struct i2c_client *client) &chip->leds[i].ldev, &init_data); if (ret < 0) { - fwnode_handle_put(child); + for (j = i; j < PCA995X_MAX_OUTPUTS; j++) + fwnode_handle_put(led_fwnodes[j]); return dev_err_probe(dev, ret, "Could not register LED %s\n", chip->leds[i].ldev.name); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index acff2f64f251..4545d934f73d 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -4717,13 +4717,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv ti->error = "Block size doesn't match the information in superblock"; goto bad; } - if (!le32_to_cpu(ic->sb->journal_sections) != (ic->mode == 'I')) { - r = -EINVAL; - if (ic->mode != 'I') + if (ic->mode != 'I') { + if (!le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; ti->error = "Corrupted superblock, journal_sections is 0"; - else + goto bad; + } + } else { + if (le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; ti->error = "Corrupted superblock, journal_sections is not 0"; - goto bad; + goto bad; + } } /* make sure that ti->max_io_len doesn't overflow */ if (!ic->meta_dev) { diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f7e9a3632eb3..499f8cc8a39f 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -496,8 +496,10 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); dm_put_live_table(md, srcu_idx); - return BLK_STS_RESOURCE; + return BLK_STS_IOERR; } ti = dm_table_find_target(map, 0); dm_put_live_table(md, srcu_idx); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 87bb90303435..ff4a6b570b76 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2030,10 +2030,15 @@ static void dm_submit_bio(struct bio *bio) struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx); + if (unlikely(!map)) { + DMERR_LIMIT("%s: mapping table unavailable, erroring io", + dm_device_name(md)); + bio_io_error(bio); + goto out; + }
- /* If suspended, or map not yet available, queue this IO for later */ - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || - unlikely(!map)) { + /* If suspended, queue this IO for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); else if (bio->bi_opf & REQ_RAHEAD) diff --git a/drivers/md/md.c b/drivers/md/md.c index d3a837506a36..23cc77d51676 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8668,7 +8668,6 @@ void md_write_start(struct mddev *mddev, struct bio *bi) BUG_ON(mddev->ro == MD_RDONLY); if (mddev->ro == MD_AUTO_READ) { /* need to switch to read/write */ - flush_work(&mddev->sync_work); mddev->ro = MD_RDWR; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c index 30d10fe4b33e..320aa2bf99d4 100644 --- a/drivers/media/dvb-frontends/rtl2830.c +++ b/drivers/media/dvb-frontends/rtl2830.c @@ -609,7 +609,7 @@ static int rtl2830_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, int on index, pid, onoff);
/* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0;
if (onoff) diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c index 5142820b1b3d..76c3f40443b2 100644 --- a/drivers/media/dvb-frontends/rtl2832.c +++ b/drivers/media/dvb-frontends/rtl2832.c @@ -983,7 +983,7 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid, index, pid, onoff, dev->slave_ts);
/* skip invalid PIDs (0x2000) */ - if (pid > 0x1fff || index > 32) + if (pid > 0x1fff || index >= 32) return 0;
if (onoff) diff --git a/drivers/media/platform/imagination/Kconfig b/drivers/media/platform/imagination/Kconfig index 7139ae22219b..a302c955483d 100644 --- a/drivers/media/platform/imagination/Kconfig +++ b/drivers/media/platform/imagination/Kconfig @@ -2,6 +2,7 @@ config VIDEO_E5010_JPEG_ENC tristate "Imagination E5010 JPEG Encoder Driver" depends on VIDEO_DEV + depends on ARCH_K3 || COMPILE_TEST select VIDEOBUF2_DMA_CONTIG select VIDEOBUF2_VMALLOC select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c index 73d5cef33b2a..1e1b32faac77 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c @@ -347,11 +347,16 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr;
mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p", inst->num_nalu, y_fb_dma, c_fb_dma, fb); diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c index 2d4611e7fa0b..1ed0ccec5665 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c @@ -724,11 +724,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr; mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx", inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c index e27e728f392e..232ef3bd246a 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c @@ -334,14 +334,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); - dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + if (!fb) { + mtk_vdec_err(inst->ctx, "fb buffer is NULL"); + return -ENOMEM; + }
- y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; + dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + y_fb_dma = fb->base_y.dma_addr; if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) c_fb_dma = y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h; else - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + c_fb_dma = fb->base_c.dma_addr;
inst->vsi->dec.bs_dma = (u64)bs->dma_addr; inst->vsi->dec.bs_sz = bs->size; diff --git a/drivers/media/platform/raspberrypi/pisp_be/Kconfig b/drivers/media/platform/raspberrypi/pisp_be/Kconfig index 38c0f8305d62..46765a2e4c4d 100644 --- a/drivers/media/platform/raspberrypi/pisp_be/Kconfig +++ b/drivers/media/platform/raspberrypi/pisp_be/Kconfig @@ -2,6 +2,7 @@ config VIDEO_RASPBERRYPI_PISP_BE tristate "Raspberry Pi PiSP Backend (BE) ISP driver" depends on V4L_PLATFORM_DRIVERS depends on VIDEO_DEV + depends on ARCH_BCM2835 || COMPILE_TEST select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER select VIDEOBUF2_DMA_CONTIG diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c index e68fcdaea207..c7fdee347ac8 100644 --- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c +++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c @@ -865,6 +865,7 @@ static const struct of_device_id rzg2l_csi2_of_table[] = { { .compatible = "renesas,rzg2l-csi2", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, rzg2l_csi2_of_table);
static struct platform_driver rzg2l_csi2_pdrv = { .remove_new = rzg2l_csi2_remove, diff --git a/drivers/media/tuners/tuner-i2c.h b/drivers/media/tuners/tuner-i2c.h index 07aeead0644a..724952e001cd 100644 --- a/drivers/media/tuners/tuner-i2c.h +++ b/drivers/media/tuners/tuner-i2c.h @@ -133,10 +133,8 @@ static inline int tuner_i2c_xfer_send_recv(struct tuner_i2c_props *props, } \ if (0 == __ret) { \ state = kzalloc(sizeof(type), GFP_KERNEL); \ - if (!state) { \ - __ret = -ENOMEM; \ + if (NULL == state) \ goto __fail; \ - } \ state->i2c_props.addr = i2caddr; \ state->i2c_props.adap = i2cadap; \ state->i2c_props.name = devname; \ diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 66044f4f5bad..10cd1d9b4885 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -207,6 +207,9 @@ static int powernv_flash_set_driver_info(struct device *dev, * get them */ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!mtd->name) + return -ENOMEM; + mtd->type = MTD_NORFLASH; mtd->flags = MTD_WRITEABLE; mtd->size = size; diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 28131a127d06..8297b366a066 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -296,10 +296,12 @@ static int __init init_slram(void) T("slram: devname = %s\n", devname); if ((!map) || (!(devstart = strsep(&map, ",")))) { E("slram: No devicestart specified.\n"); + break; } T("slram: devstart = %s\n", devstart); if ((!map) || (!(devlength = strsep(&map, ",")))) { E("slram: No devicelength / -end specified.\n"); + break; } T("slram: devlength = %s\n", devlength); if (parse_cmdline(devname, devstart, devlength) != 0) { diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 17477bb2d48f..586868b4139f 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1429,16 +1429,32 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, return 0; }
+static void mtk_nfc_nand_chips_cleanup(struct mtk_nfc *nfc) +{ + struct mtk_nfc_nand_chip *mtk_chip; + struct nand_chip *chip; + int ret; + + while (!list_empty(&nfc->chips)) { + mtk_chip = list_first_entry(&nfc->chips, + struct mtk_nfc_nand_chip, node); + chip = &mtk_chip->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&mtk_chip->node); + } +} + static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) { struct device_node *np = dev->of_node; - struct device_node *nand_np; int ret;
- for_each_child_of_node(np, nand_np) { + for_each_child_of_node_scoped(np, nand_np) { ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); if (ret) { - of_node_put(nand_np); + mtk_nfc_nand_chips_cleanup(nfc); return ret; } } @@ -1570,20 +1586,8 @@ static int mtk_nfc_probe(struct platform_device *pdev) static void mtk_nfc_remove(struct platform_device *pdev) { struct mtk_nfc *nfc = platform_get_drvdata(pdev); - struct mtk_nfc_nand_chip *mtk_chip; - struct nand_chip *chip; - int ret; - - while (!list_empty(&nfc->chips)) { - mtk_chip = list_first_entry(&nfc->chips, - struct mtk_nfc_nand_chip, node); - chip = &mtk_chip->nand; - ret = mtd_device_unregister(nand_to_mtd(chip)); - WARN_ON(ret); - nand_cleanup(chip); - list_del(&mtk_chip->node); - }
+ mtk_nfc_nand_chips_cleanup(nfc); mtk_ecc_release(nfc->ecc); }
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 7aca0544fb29..e80992b4f9de 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -68,6 +68,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) __be16 proto; void *oiph; int err; + int nh;
bareudp = rcu_dereference_sk_user_data(sk); if (!bareudp) @@ -148,10 +149,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) } skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; - oiph = skb_network_header(skb); - skb_reset_network_header(skb); skb_reset_mac_header(skb);
+ /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + + skb_reset_network_header(skb); + + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(bareudp->dev, rx_length_errors); + DEV_STATS_INC(bareudp->dev, rx_errors); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else @@ -301,6 +317,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be32 saddr; int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + return -EINVAL; + if (!sock) return -ESHUTDOWN;
@@ -368,6 +387,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err;
+ if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB))) + return -EINVAL; + if (!sock) return -ESHUTDOWN;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bb9c3d6ef435..e20bee1bdffd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5536,9 +5536,9 @@ bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp) break;
default: - /* Should never happen. Mode guarded by bond_xdp_check() */ - netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond)); - WARN_ON_ONCE(1); + if (net_ratelimit()) + netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", + BOND_MODE(bond)); return NULL; }
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 012c3d22b01d..7fec04b024d5 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1763,11 +1763,7 @@ static int m_can_close(struct net_device *dev)
netif_stop_queue(dev);
- if (!cdev->is_peripheral) - napi_disable(&cdev->napi); - m_can_stop(dev); - m_can_clk_stop(cdev); free_irq(dev->irq, dev);
m_can_clean(dev); @@ -1776,10 +1772,13 @@ static int m_can_close(struct net_device *dev) destroy_workqueue(cdev->tx_wq); cdev->tx_wq = NULL; can_rx_offload_disable(&cdev->offload); + } else { + napi_disable(&cdev->napi); }
close_candev(dev);
+ m_can_clk_stop(cdev); phy_power_off(cdev->transceiver);
return 0; @@ -2030,6 +2029,8 @@ static int m_can_open(struct net_device *dev)
if (cdev->is_peripheral) can_rx_offload_enable(&cdev->offload); + else + napi_enable(&cdev->napi);
/* register interrupt handler */ if (cdev->is_peripheral) { @@ -2063,9 +2064,6 @@ static int m_can_open(struct net_device *dev) if (err) goto exit_start_fail;
- if (!cdev->is_peripheral) - napi_enable(&cdev->napi); - netif_start_queue(dev);
return 0; @@ -2079,6 +2077,8 @@ static int m_can_open(struct net_device *dev) out_wq_fail: if (cdev->is_peripheral) can_rx_offload_disable(&cdev->offload); + else + napi_disable(&cdev->napi); close_candev(dev); exit_disable_clks: m_can_clk_stop(cdev); diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 41a0e4261d15..03ad10b01867 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -3,7 +3,7 @@ * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro * * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs socketcan@esd.eu - * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus frank.jungclaus@esd.eu + * Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus frank.jungclaus@esd.eu */
#include <linux/can.h> @@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev) if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - flags |= ESD_USB_3_BAUDRATE_FLAG_TRS; - baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) @@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index) switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { case ESD_USB_CANUSB3_PRODUCT_ID: priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 5c45f42232d3..f04f42ea60c0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2305,12 +2305,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d", priv->ndev->name, i); - err = request_irq(irq, enetc_msix, 0, v->name, v); + err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); if (err) { dev_err(priv->dev, "request_irq() failed!\n"); goto irq_err; } - disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index fe64febf7436..991b477c873f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -371,6 +371,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, IDPF_TX_DESCS_FOR_CTX)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+ u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + return NETDEV_TX_BUSY; }
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 585c3dadd9bf..fd2d14ba7545 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -2157,29 +2157,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); }
-/** - * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions - * @tx_q: the queue to be checked - * @size: number of descriptors we want to assure is available - * - * Returns 0 if stop is not needed - */ -int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size) -{ - struct netdev_queue *nq; - - if (likely(IDPF_DESC_UNUSED(tx_q) >= size)) - return 0; - - u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.q_busy); - u64_stats_update_end(&tx_q->stats_sync); - - nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - - return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); -} - /** * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked @@ -2191,7 +2168,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) - goto splitq_stop; + goto out;
/* If there are too many outstanding completions expected on the * completion queue, stop the TX queue to give the device some time to @@ -2210,10 +2187,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, return 0;
splitq_stop: + netif_stop_subqueue(tx_q->netdev, tx_q->idx); + +out: u64_stats_update_begin(&tx_q->stats_sync); u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY; } @@ -2236,7 +2215,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val;
- idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); + if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.q_busy); + u64_stats_update_end(&tx_q->stats_sync); + }
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 6215dbee5546..90415eb309cc 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -1064,7 +1064,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 ring_idx); unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb); -int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q); @@ -1073,4 +1072,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, + u32 needed) +{ + return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + IDPF_DESC_UNUSED(tx_q), + needed, needed); +} + #endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index c002ede36402..85519690b837 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -23,6 +23,8 @@ config FBNIC depends on !S390 depends on MAX_SKB_FRAGS < 22 depends on PCI_MSI + select NET_DEVLINK + select PAGE_POOL select PHYLINK help This driver supports Meta Platforms Host Network Interface. diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index 0ed4c9fff5d8..72f88ae7815f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -1012,14 +1012,14 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, nv->fbd = fbd; nv->v_idx = v_idx;
- /* Record IRQ to NAPI struct */ - netif_napi_set_irq(&nv->napi, - pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); - /* Tie napi to netdev */ list_add(&nv->napis, &fbn->napis); netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+ /* Record IRQ to NAPI struct */ + netif_napi_set_irq(&nv->napi, + pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); + /* Tie nv back to PCIe dev */ nv->dev = fbd->dev;
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 1f74317beb88..e1e5d9672ae4 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); rtl8168g_enable_gphy_10m(phydev);
+ rtl8168g_disable_aldps(phydev); rtl8125a_config_eee_phy(phydev); }
@@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
rtl8125_legacy_force_mode(phydev); + rtl8168g_disable_aldps(phydev); rtl8125b_config_eee_phy(phydev); }
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9893c91af105..a7de5cf6b317 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1052,6 +1052,7 @@ struct ravb_hw_info { netdev_features_t net_features; int stats_len; u32 tccr_mask; + u32 tx_max_frame_size; u32 rx_max_frame_size; u32 rx_buffer_size; u32 rx_desc_size; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c02fb296bf7d..6b82df11fe8d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -555,8 +555,16 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
static void ravb_emac_init_rcar(struct net_device *ndev) { - /* Receive frame limit set register */ - ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); + struct ravb_private *priv = netdev_priv(ndev); + + /* Set receive frame length + * + * The length set here describes the frame from the destination address + * up to and including the CRC data. However only the frame data, + * excluding the CRC, are transferred to memory. To allow for the + * largest frames add the CRC length to the maximum Rx descriptor size. + */ + ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ ravb_write(ndev, ECMR_ZPF | ECMR_DM | @@ -2674,6 +2682,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -2696,6 +2705,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -2721,6 +2731,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = { .net_features = NETIF_F_RXCSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .tx_max_frame_size = SZ_2K, .rx_max_frame_size = SZ_2K, .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), @@ -2770,6 +2781,7 @@ static const struct ravb_hw_info gbeth_hw_info = { .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), .tccr_mask = TCCR_TSRQ0, + .tx_max_frame_size = 1522, .rx_max_frame_size = SZ_8K, .rx_buffer_size = SZ_2K, .rx_desc_size = sizeof(struct ravb_rx_desc), @@ -2981,7 +2993,7 @@ static int ravb_probe(struct platform_device *pdev) priv->avb_link_active_low = of_property_read_bool(np, "renesas,ether-link-active-low");
- ndev->max_mtu = info->rx_max_frame_size - + ndev->max_mtu = info->tx_max_frame_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c672f92d65e9..9319a2675e7b 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -847,9 +847,11 @@ static void ether3_remove(struct expansion_card *ec) { struct net_device *dev = ecard_get_drvdata(ec);
+ ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); ecard_set_drvdata(ec, NULL);
unregister_netdev(dev); + del_timer_sync(&priv(dev)->timer); free_netdev(dev); ecard_release_resources(ec); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 9e40c28d453a..ee3604f58def 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -35,6 +35,9 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) /* Disable RX queues routing by default */ plat->rx_queues_cfg[0].pkt_route = 0x0;
+ plat->clk_ref_rate = 125000000; + plat->clk_ptp_rate = 125000000; + /* Default to phy auto-detection */ plat->phy_addr = -1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f3a1b179aaea..95d3d1081727 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2022,7 +2022,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, rx_q->queue_index = queue; rx_q->priv_data = priv;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.flags = PP_FLAG_DMA_MAP | (xdp_prog ? PP_FLAG_DMA_SYNC_DEV : 0); pp_params.pool_size = dma_conf->dma_rx_size; num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); pp_params.order = ilog2(num_pages); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 9eb300fc3590..5dbfee4aee43 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -674,15 +674,15 @@ static int axienet_device_reset(struct net_device *ndev) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. - * Returns the number of descriptors handled. + * Returns the number of packets handled. */ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, int nr_bds, bool force, u32 *sizep, int budget) { struct axidma_bd *cur_p; unsigned int status; + int i, packets = 0; dma_addr_t phys; - int i;
for (i = 0; i < nr_bds; i++) { cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; @@ -701,8 +701,10 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) + if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { napi_consume_skb(cur_p->skb, budget); + packets++; + }
cur_p->app0 = 0; cur_p->app1 = 0; @@ -718,7 +720,13 @@ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; }
- return i; + if (!force) { + lp->tx_bd_ci += i; + if (lp->tx_bd_ci >= lp->tx_bd_num) + lp->tx_bd_ci %= lp->tx_bd_num; + } + + return packets; }
/** @@ -891,13 +899,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) u32 size = 0; int packets;
- packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); + packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, + &size, budget);
if (packets) { - lp->tx_bd_ci += packets; - if (lp->tx_bd_ci >= lp->tx_bd_num) - lp->tx_bd_ci %= lp->tx_bd_num; - u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); @@ -1222,9 +1227,10 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) u32 cr = lp->tx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_tx); + if (napi_schedule_prep(&lp->napi_tx)) { + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_tx); + } }
return IRQ_HANDLED; @@ -1266,9 +1272,10 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) u32 cr = lp->rx_dma_cr;
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); - axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - - napi_schedule(&lp->napi_rx); + if (napi_schedule_prep(&lp->napi_rx)) { + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + __napi_schedule(&lp->napi_rx); + } }
return IRQ_HANDLED; diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 16789cd446e9..3f4187102e77 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -65,6 +65,7 @@ static struct netkit *netkit_priv(const struct net_device *dev)
static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct netkit *nk = netkit_priv(dev); enum netkit_action ret = READ_ONCE(nk->policy); netdev_tx_t ret_dev = NET_XMIT_SUCCESS; @@ -72,6 +73,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *peer; int len = skb->len;
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); rcu_read_lock(); peer = rcu_dereference(nk->peer); if (unlikely(!peer || !(peer->flags & IFF_UP) || @@ -110,6 +112,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) break; } rcu_read_unlock(); + bpf_net_ctx_clear(bpf_net_ctx); return ret_dev; }
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c index 524627a36c6f..dac6464b5fe2 100644 --- a/drivers/net/phy/aquantia/aquantia_firmware.c +++ b/drivers/net/phy/aquantia/aquantia_firmware.c @@ -353,26 +353,32 @@ int aqr_firmware_load(struct phy_device *phydev) { int ret;
- ret = aqr_wait_reset_complete(phydev); - if (ret) - return ret; - - /* Check if the firmware is not already loaded by pooling - * the current version returned by the PHY. If 0 is returned, - * no firmware is loaded. + /* Check if the firmware is not already loaded by polling + * the current version returned by the PHY. */ - ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID); - if (ret > 0) - goto exit; - - ret = aqr_firmware_load_nvmem(phydev); - if (!ret) - goto exit; - - ret = aqr_firmware_load_fs(phydev); - if (ret) + ret = aqr_wait_reset_complete(phydev); + switch (ret) { + case 0: + /* Some firmware is loaded => do nothing */ + return 0; + case -ETIMEDOUT: + /* VEND1_GLOBAL_FW_ID still reads 0 after 2 seconds of polling. + * We don't have full confidence that no firmware is loaded (in + * theory it might just not have loaded yet), but we will + * assume that, and load a new image. + */ + ret = aqr_firmware_load_nvmem(phydev); + if (!ret) + return ret; + + ret = aqr_firmware_load_fs(phydev); + if (ret) + return ret; + break; + default: + /* PHY read error, propagate it to the caller */ return ret; + }
-exit: return 0; } diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c index 0516ac02c3f8..201c8df93fad 100644 --- a/drivers/net/phy/aquantia/aquantia_leds.c +++ b/drivers/net/phy/aquantia/aquantia_leds.c @@ -120,7 +120,8 @@ int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index, int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable) { return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index), - VEND1_GLOBAL_LED_DRIVE_VDD, enable); + VEND1_GLOBAL_LED_DRIVE_VDD, + enable ? VEND1_GLOBAL_LED_DRIVE_VDD : 0); }
int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes) diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index e982e9ce44a5..4d156d406bab 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -435,6 +435,9 @@ static int aqr107_set_tunable(struct phy_device *phydev, } }
+#define AQR_FW_WAIT_SLEEP_US 20000 +#define AQR_FW_WAIT_TIMEOUT_US 2000000 + /* If we configure settings whilst firmware is still initializing the chip, * then these settings may be overwritten. Therefore make sure chip * initialization has completed. Use presence of the firmware ID as @@ -444,11 +447,19 @@ static int aqr107_set_tunable(struct phy_device *phydev, */ int aqr_wait_reset_complete(struct phy_device *phydev) { - int val; + int ret, val; + + ret = read_poll_timeout(phy_read_mmd, val, val != 0, + AQR_FW_WAIT_SLEEP_US, AQR_FW_WAIT_TIMEOUT_US, + false, phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_FW_ID); + if (val < 0) { + phydev_err(phydev, "Failed to read VEND1_GLOBAL_FW_ID: %pe\n", + ERR_PTR(val)); + return val; + }
- return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, - VEND1_GLOBAL_FW_ID, val, val != 0, - 20000, 2000000, false); + return ret; }
static void aqr107_chip_info(struct phy_device *phydev) @@ -478,7 +489,7 @@ static int aqr107_config_init(struct phy_device *phydev) { struct aqr107_priv *priv = phydev->priv; u32 led_active_low; - int ret, index = 0; + int ret;
/* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && @@ -505,10 +516,9 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Restore LED polarity state after reset */ for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) { - ret = aqr_phy_led_active_low_set(phydev, index, led_active_low); + ret = aqr_phy_led_active_low_set(phydev, led_active_low, true); if (ret) return ret; - index++; }
return 0; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 18eb5ba436df..2506aa8c603e 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -464,10 +464,15 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, void usbnet_defer_kevent (struct usbnet *dev, int work) { set_bit (work, &dev->flags); - if (!schedule_work (&dev->kevent)) - netdev_dbg(dev->net, "kevent %s may have been dropped\n", usbnet_event_names[work]); - else - netdev_dbg(dev->net, "kevent %s scheduled\n", usbnet_event_names[work]); + if (!usbnet_going_away(dev)) { + if (!schedule_work(&dev->kevent)) + netdev_dbg(dev->net, + "kevent %s may have been dropped\n", + usbnet_event_names[work]); + else + netdev_dbg(dev->net, + "kevent %s scheduled\n", usbnet_event_names[work]); + } } EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -535,7 +540,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + if (!usbnet_going_away(dev)) + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); @@ -843,9 +849,18 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */ dev->flags = 0; - del_timer_sync (&dev->delay); - tasklet_kill (&dev->bh); + del_timer_sync(&dev->delay); + tasklet_kill(&dev->bh); cancel_work_sync(&dev->kevent); + + /* We have cyclic dependencies. Those calls are needed + * to break a cycle. We cannot fall into the gaps because + * we have a flag + */ + tasklet_kill(&dev->bh); + del_timer_sync(&dev->delay); + cancel_work_sync(&dev->kevent); + if (!pm) usb_autopm_put_interface(dev->intf);
@@ -1171,7 +1186,8 @@ usbnet_deferred_kevent (struct work_struct *work) status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + tasklet_schedule(&dev->bh); } }
@@ -1196,7 +1212,8 @@ usbnet_deferred_kevent (struct work_struct *work) usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) - tasklet_schedule (&dev->bh); + if (!usbnet_going_away(dev)) + tasklet_schedule(&dev->bh); } }
@@ -1559,6 +1576,7 @@ static void usbnet_bh (struct timer_list *t) } else if (netif_running (dev->net) && netif_device_present (dev->net) && netif_carrier_ok(dev->net) && + !usbnet_going_away(dev) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_PAUSED, &dev->flags) && !test_bit(EVENT_RX_HALT, &dev->flags)) { @@ -1606,6 +1624,7 @@ void usbnet_disconnect (struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (!dev) return; + usbnet_mark_going_away(dev);
xdev = interface_to_usbdev (intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5a1c1ec5a64b..f8131f92a392 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1807,6 +1807,11 @@ static struct sk_buff *receive_small(struct net_device *dev, struct page *page = virt_to_head_page(buf); struct sk_buff *skb;
+ /* We passed the address of virtnet header to virtio-core, + * so truncate the padding. + */ + buf -= VIRTNET_RX_PAD + xdp_headroom; + len -= vi->hdr_len; u64_stats_add(&stats->bytes, len);
@@ -2422,8 +2427,9 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!buf)) return -ENOMEM;
- virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom, - vi->hdr_len + GOOD_PACKET_LEN); + buf += VIRTNET_RX_PAD + xdp_headroom; + + virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { @@ -2884,6 +2890,25 @@ static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim) net_dim_work_cancel(dim); }
+static void virtnet_update_settings(struct virtnet_info *vi) +{ + u32 speed; + u8 duplex; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) + return; + + virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); + + if (ethtool_validate_speed(speed)) + vi->speed = speed; + + virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); + + if (ethtool_validate_duplex(duplex)) + vi->duplex = duplex; +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -2902,6 +2927,15 @@ static int virtnet_open(struct net_device *dev) goto err_enable_qp; }
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { + if (vi->status & VIRTIO_NET_S_LINK_UP) + netif_carrier_on(vi->dev); + virtio_config_driver_enable(vi->vdev); + } else { + vi->status = VIRTIO_NET_S_LINK_UP; + netif_carrier_on(dev); + } + return 0;
err_enable_qp: @@ -3380,12 +3414,22 @@ static int virtnet_close(struct net_device *dev) disable_delayed_refill(vi); /* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); + /* Prevent the config change callback from changing carrier + * after close + */ + virtio_config_driver_disable(vi->vdev); + /* Stop getting status/speed updates: we don't care until next + * open + */ + cancel_work_sync(&vi->config_work);
for (i = 0; i < vi->max_queue_pairs; i++) { virtnet_disable_queue_pair(vi, i); virtnet_cancel_dim(vi, &vi->rq[i].dim); }
+ netif_carrier_off(dev); + return 0; }
@@ -5094,25 +5138,6 @@ static void virtnet_init_settings(struct net_device *dev) vi->duplex = DUPLEX_UNKNOWN; }
-static void virtnet_update_settings(struct virtnet_info *vi) -{ - u32 speed; - u8 duplex; - - if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) - return; - - virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); - - if (ethtool_validate_speed(speed)) - vi->speed = speed; - - virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); - - if (ethtool_validate_duplex(duplex)) - vi->duplex = duplex; -} - static u32 virtnet_get_rxfh_key_size(struct net_device *dev) { return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size; @@ -6521,6 +6546,9 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_failover; }
+ /* Disable config change notification until ndo_open. */ + virtio_config_driver_disable(vi->vdev); + virtio_device_ready(vdev);
virtnet_set_queues(vi, vi->curr_queue_pairs); @@ -6570,19 +6598,11 @@ static int virtnet_probe(struct virtio_device *vdev) vi->device_stats_cap = le64_to_cpu(v); }
- rtnl_unlock(); - - err = virtnet_cpu_notif_add(vi); - if (err) { - pr_debug("virtio_net: registering cpu notifier failed\n"); - goto free_unregister_netdev; - } - /* Assume link up if device can't report link status, otherwise get link status from config. */ netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { - schedule_work(&vi->config_work); + virtnet_config_changed_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; virtnet_update_settings(vi); @@ -6594,6 +6614,14 @@ static int virtnet_probe(struct virtio_device *vdev) set_bit(guest_offloads[i], &vi->guest_offloads); vi->guest_offloads_capable = vi->guest_offloads;
+ rtnl_unlock(); + + err = virtnet_cpu_notif_add(vi); + if (err) { + pr_debug("virtio_net: registering cpu notifier failed\n"); + goto free_unregister_netdev; + } + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index b655967a465b..76faa55fd0f3 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -399,6 +399,7 @@ struct ath11k_vif { u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; struct delayed_work connection_loss_work; + struct work_struct bcn_tx_work; int num_legacy_stations; int rtscts_prot_mode; int txpower; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7c0ef6916dd2..f8068d2e848c 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -6599,6 +6599,16 @@ static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif) return ret; }
+static void ath11k_mac_bcn_tx_work(struct work_struct *work) +{ + struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, + bcn_tx_work); + + mutex_lock(&arvif->ar->conf_mutex); + ath11k_mac_bcn_tx_event(arvif); + mutex_unlock(&arvif->ar->conf_mutex); +} + static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -6637,6 +6647,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list); + INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work); INIT_DELAYED_WORK(&arvif->connection_loss_work, ath11k_mac_vif_sta_connection_loss_work);
@@ -6879,6 +6890,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, int i;
cancel_delayed_work_sync(&arvif->connection_loss_work); + cancel_work_sync(&arvif->bcn_tx_work);
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 38f175dd1557..2662092ee00a 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -7404,7 +7404,9 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s rcu_read_unlock(); return; } - ath11k_mac_bcn_tx_event(arvif); + + queue_work(ab->workqueue, &arvif->bcn_tx_work); + rcu_read_unlock(); }
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index ce41c8153080..a3248d977532 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -2196,9 +2196,8 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ - ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) { if (sta->deflink.vht_cap.vht_supported) diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 9f6be557365e..a76413320dbf 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -1538,6 +1538,7 @@ int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, sizeof(*cmd)); cmd->req_type = cpu_to_le32(type); + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI bss chan info req type %d\n", type); diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index f1f52175a52b..6a913f9b8315 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -3121,6 +3121,7 @@ struct wmi_pdev_bss_chan_info_req_cmd { __le32 tlv_header; /* ref wmi_bss_chan_info_req_type */ __le32 req_type; + __le32 pdev_id; } __packed;
struct wmi_ap_ps_peer_cmd { @@ -4085,7 +4086,6 @@ struct wmi_vdev_stopped_event { } __packed;
struct wmi_pdev_bss_chan_info_event { - __le32 pdev_id; __le32 freq; /* Units in MHz */ __le32 noise_floor; /* units are dBm */ /* rx clear - how often the channel was unused */ @@ -4103,6 +4103,7 @@ struct wmi_pdev_bss_chan_info_event { /*rx_cycle cnt for my bss in 64bits format */ __le32 rx_bss_cycle_count_low; __le32 rx_bss_cycle_count_high; + __le32 pdev_id; } __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index d84e3ee7b5d9..bf3da631c69f 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1380,8 +1380,6 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy = debugfs_create_dir("ath9k", sc->hw->wiphy->debugfsdir); - if (IS_ERR(sc->debug.debugfs_phy)) - return -ENOMEM;
#ifdef CONFIG_ATH_DEBUG debugfs_create_file("debug", 0600, sc->debug.debugfs_phy, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c index f7c6d9bc9311..9437d69877cc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c @@ -486,8 +486,6 @@ int ath9k_htc_init_debug(struct ath_hw *ah)
priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); - if (IS_ERR(priv->debug.debugfs_phy)) - return -ENOMEM;
ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c index 0c3d119d1219..1e8495f50c16 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c @@ -123,7 +123,7 @@ static s32 brcmf_btcoex_params_read(struct brcmf_if *ifp, u32 addr, u32 *data) { *data = addr;
- return brcmf_fil_iovar_int_get(ifp, "btc_params", data); + return brcmf_fil_iovar_int_query(ifp, "btc_params", data); }
/** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d4cc5fa92341..815f6b3c79fc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -663,8 +663,8 @@ static int brcmf_cfg80211_request_sta_if(struct brcmf_if *ifp, u8 *macaddr) /* interface_create version 3+ */ /* get supported version from firmware side */ iface_create_ver = 0; - err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", - &iface_create_ver); + err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", + &iface_create_ver); if (err) { brcmf_err("fail to get supported version, err=%d\n", err); return -EOPNOTSUPP; @@ -756,8 +756,8 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) /* interface_create version 3+ */ /* get supported version from firmware side */ iface_create_ver = 0; - err = brcmf_fil_bsscfg_int_get(ifp, "interface_create", - &iface_create_ver); + err = brcmf_fil_bsscfg_int_query(ifp, "interface_create", + &iface_create_ver); if (err) { brcmf_err("fail to get supported version, err=%d\n", err); return -EOPNOTSUPP; @@ -2101,7 +2101,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) if (!sme->crypto.n_akm_suites) return 0;
- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); + err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), + "wpa_auth", &val); if (err) { bphy_err(drvr, "could not get wpa_auth (%d)\n", err); return err; @@ -2680,7 +2681,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); struct brcmf_pub *drvr = cfg->pub; - s32 qdbm = 0; + s32 qdbm; s32 err;
brcmf_dbg(TRACE, "Enter\n"); @@ -3067,7 +3068,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, struct brcmf_scb_val_le scbval; struct brcmf_pktcnt_le pktcnt; s32 err; - u32 rate = 0; + u32 rate; u32 rssi;
/* Get the current tx rate */ @@ -7046,8 +7047,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; - err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info", - &chaninfo); + err = brcmf_fil_bsscfg_int_query(ifp, "per_chan_info", + &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) channel->flags |= @@ -7081,7 +7082,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
/* verify support for bw_cap command */ val = WLC_BAND_5G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &val);
if (!err) { /* only set 2G bandwidth using bw_cap command */ @@ -7157,11 +7158,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) int err;
band = WLC_BAND_2G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); if (!err) { bw_cap[NL80211_BAND_2GHZ] = band; band = WLC_BAND_5G; - err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band); + err = brcmf_fil_iovar_int_query(ifp, "bw_cap", &band); if (!err) { bw_cap[NL80211_BAND_5GHZ] = band; return; @@ -7170,7 +7171,6 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) return; } brcmf_dbg(INFO, "fallback to mimo_bw_cap info\n"); - mimo_bwcap = 0; err = brcmf_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap); if (err) /* assume 20MHz if firmware does not give a clue */ @@ -7266,10 +7266,10 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); struct wiphy *wiphy = cfg_to_wiphy(cfg); - u32 nmode = 0; + u32 nmode; u32 vhtmode = 0; u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; - u32 rxchain = 0; + u32 rxchain; u32 nchain; int err; s32 i; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index bf91b1e1368f..df53dd1d7e74 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev) { struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pub *drvr = ifp->drvr; - u32 monitor = 0; + u32 monitor; int err;
brcmf_dbg(TRACE, "Enter\n"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index f23310a77a5d..0d9ae197fa1e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -184,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv) static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, enum brcmf_feat_id id, char *name) { - u32 data = 0; + u32 data; int err;
/* we need to know firmware error */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index a315a7fac6a0..31e080e4da66 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -96,15 +96,22 @@ static inline s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data) { s32 err; - __le32 data_le = cpu_to_le32(*data);
- err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le)); + err = brcmf_fil_cmd_data_get(ifp, cmd, data, sizeof(*data)); if (err == 0) - *data = le32_to_cpu(data_le); + *data = le32_to_cpu(*(__le32 *)data); brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
return err; } +static inline +s32 brcmf_fil_cmd_int_query(struct brcmf_if *ifp, u32 cmd, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_cmd_int_get(ifp, cmd, data); +}
s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data, u32 len); @@ -120,14 +127,21 @@ s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data) static inline s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data) { - __le32 data_le = cpu_to_le32(*data); s32 err;
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le)); + err = brcmf_fil_iovar_data_get(ifp, name, data, sizeof(*data)); if (err == 0) - *data = le32_to_cpu(data_le); + *data = le32_to_cpu(*(__le32 *)data); return err; } +static inline +s32 brcmf_fil_iovar_int_query(struct brcmf_if *ifp, const char *name, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_iovar_int_get(ifp, name, data); +}
s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, @@ -145,15 +159,21 @@ s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data) static inline s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data) { - __le32 data_le = cpu_to_le32(*data); s32 err;
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le, - sizeof(data_le)); + err = brcmf_fil_bsscfg_data_get(ifp, name, data, sizeof(*data)); if (err == 0) - *data = le32_to_cpu(data_le); + *data = le32_to_cpu(*(__le32 *)data); return err; } +static inline +s32 brcmf_fil_bsscfg_int_query(struct brcmf_if *ifp, const char *name, u32 *data) +{ + __le32 *data_le = (__le32 *)data; + + *data_le = cpu_to_le32(*data); + return brcmf_fil_bsscfg_int_get(ifp, name, data); +}
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id, void *data, u32 len); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index 3b6b8b410be5..b230441abc16 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -148,6 +148,17 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, };
+const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_BZ, + .base_params = &iwl_bz_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, +}; + const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index b2abd4fd1944..34c91deca57b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -504,6 +504,7 @@ extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg; extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index c4c1e67b9ac7..8f63cbe9e50d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -109,7 +109,7 @@ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true -#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 +#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index afd90a52d4ec..55245f913286 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -772,6 +772,7 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_ftm_iter_data target;
target.bssid = bssid; + target.cipher = cipher; ieee80211_iter_keys(mvm->hw, vif, iter, &target); } else { memcpy(tk, entry->tk, sizeof(entry->tk)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index a8c42ce3b630..72fa7ac86516 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -114,16 +114,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id, mvm->aux_sta.tfd_queue_msk);
- if (mvm->mld_api_is_used) { - iwl_mvm_mld_rm_aux_sta(mvm); - mutex_unlock(&mvm->mutex); - return; - } - /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end - * of use */ - if (iwl_mvm_has_new_station_api(mvm->fw)) + * of use. For the even newer mld api, use the appropriate + * function. + */ + if (mvm->mld_api_is_used) + iwl_mvm_mld_rm_aux_sta(mvm); + else if (iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); }
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 84fd93278450..805fb249a0c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -500,9 +500,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */ - {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index bb291fe314fb..d96ee759828e 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1529,7 +1529,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid) { - struct mt76_phy *phy = dev->phys[wcid->phy_idx]; + struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx); struct ieee80211_hw *hw; struct sk_buff_head list; struct sk_buff *skb; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index ea017f22fff2..863e5770df51 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; - u8 tid = 0, hwq = 0; + u8 qid, tid = 0, hwq = 0; void *priv; int idx; u32 val; @@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) if (ieee80211_is_data_qos(hdr->frame_control)) { tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TAG1D_MASK; - u8 qid = tid_to_ac[tid]; + qid = tid_to_ac[tid]; hwq = wmm_queue_map[qid]; skb_set_queue_mapping(skb, qid); } else if (ieee80211_is_data(hdr->frame_control)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index f7722f67db57..0b9ebdcda221 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; + hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev, mt7615_hwmon_groups); return PTR_ERR_OR_ZERO(hwmon); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index 353e66069840..ef003d1620a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -28,8 +28,6 @@ enum { #define MT_RXD0_MESH BIT(18) #define MT_RXD0_MHCP BIT(19) #define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) -#define MT_RXD0_NORMAL_IP_SUM BIT(23) -#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16) #define MT_RXD0_SW_PKT_TYPE_MAP 0x380F @@ -80,6 +78,8 @@ enum { #define MT_RXD3_NORMAL_BEACON_UC BIT(21) #define MT_RXD3_NORMAL_CO_ANT BIT(22) #define MT_RXD3_NORMAL_FCS_ERR BIT(24) +#define MT_RXD3_NORMAL_IP_SUM BIT(26) +#define MT_RXD3_NORMAL_UDP_TCP_SUM BIT(27) #define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
/* RXD DW4 */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index a978f434dc5e..7bc3b4cd3592 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM;
cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops); if (!IS_ERR(cdev)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 049223df9beb..efbb8b23e471 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -564,8 +564,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | MT_WF_RFCR_DROP_RTS | - MT_WF_RFCR_DROP_CTL_RSV | - MT_WF_RFCR_DROP_NDPA); + MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags; rxfilter = phy->rxfilter; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index ef0c721d26e3..d1d64fa7d35d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7921_hwmon_groups); @@ -83,7 +85,7 @@ mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev) }
/* UNII-4 */ - if (IS_UNII_INVALID(0, 5850, 5925)) + if (IS_UNII_INVALID(0, 5845, 5925)) ch->flags |= IEEE80211_CHAN_DISABLED; }
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c index cf36750cf709..634c42bbf23f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -352,7 +352,7 @@ mt7925_mac_fill_rx_rate(struct mt792x_dev *dev, static int mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) { - u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; bool hdr_trans, unicast, insert_ccmp_hdr = false; u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info; @@ -362,7 +362,6 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) struct mt792x_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; u32 csum_status = *(u32 *)skb->cb; - u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); @@ -420,7 +419,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL;
- if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask && !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 8c0768bf9343..7c05aebd3c20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -439,6 +439,19 @@ static void mt7925_roc_iter(void *priv, u8 *mac, mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id); }
+void mt7925_roc_abort_sync(struct mt792x_dev *dev) +{ + struct mt792x_phy *phy = &dev->phy; + + del_timer_sync(&phy->roc_timer); + cancel_work_sync(&phy->roc_work); + if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) + ieee80211_iterate_interfaces(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_roc_iter, (void *)phy); +} +EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync); + void mt7925_roc_work(struct work_struct *work) { struct mt792x_phy *phy; @@ -1109,6 +1122,8 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, msta = (struct mt792x_sta *)link_sta->sta->drv_priv; mlink = mt792x_sta_to_link(msta, link_id);
+ mt7925_roc_abort_sync(dev); + mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 9dc22fbe25d3..c6c380571fd8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -638,6 +638,9 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) { clc = (const struct mt7925_clc *)(clc_base + offset);
+ if (clc->idx > ARRAY_SIZE(phy->clc)) + break; + /* do not init buf again if chip reset triggered */ if (phy->clc[clc->idx]) continue; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 669f3a079d04..c5ea431998e0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -307,6 +307,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, enum mt7925_roc_req type, u8 token_id); int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, u8 token_id); +void mt7925_roc_abort_sync(struct mt792x_dev *dev); int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *wait_seq); int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 6e4f4e78c350..39305c39362f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -449,6 +449,8 @@ static int mt7925_pci_suspend(struct device *device) cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work);
+ mt7925_roc_abort_sync(dev); + err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto restore_suspend; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c index 283df84f1b43..a98dcb40490b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c @@ -1011,8 +1011,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; - if (vif == NL80211_IFTYPE_AP) - elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, sts - 1) | @@ -1020,6 +1018,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy, sts - 1); elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP) + return; + + elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; elem->phy_cap_info[6] |= c; @@ -1179,7 +1182,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
eht_cap_elem->phy_cap_info[0] = - IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE; @@ -1193,30 +1195,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)), IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) | u8_encode_bits(val, - IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) | - u8_encode_bits(val, - IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
eht_cap_elem->phy_cap_info[2] = u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) | - u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) | - u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); + u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK); + + if (band == NL80211_BAND_6GHZ) { + eht_cap_elem->phy_cap_info[0] |= + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + + eht_cap_elem->phy_cap_info[1] |= + u8_encode_bits(val, + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK); + + eht_cap_elem->phy_cap_info[2] |= + u8_encode_bits(sts - 1, + IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK); + }
eht_cap_elem->phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | - IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | - IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] = u8_encode_bits(min_t(int, sts - 1, 2), IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] = - IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US, IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) | u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)), @@ -1230,14 +1238,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band, IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) | u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
- eht_cap_elem->phy_cap_info[7] = - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | - IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | - IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; - val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) | u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX); #define SET_EHT_MAX_NSS(_bw, _val) do { \ @@ -1248,8 +1248,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
SET_EHT_MAX_NSS(80, val); SET_EHT_MAX_NSS(160, val); - SET_EHT_MAX_NSS(320, val); + if (band == NL80211_BAND_6GHZ) + SET_EHT_MAX_NSS(320, val); #undef SET_EHT_MAX_NSS + + if (iftype != NL80211_IFTYPE_AP) + return; + + eht_cap_elem->phy_cap_info[3] |= + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK; + + eht_cap_elem->phy_cap_info[7] = + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ; + + if (band != NL80211_BAND_6GHZ) + return; + + eht_cap_elem->phy_cap_info[7] |= + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | + IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; }
static void diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index bc7111a71f98..fd5fe96c32e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -435,7 +435,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); - u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; u32 csum_status = *(u32 *)skb->cb; u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; @@ -497,7 +497,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, if (!sband->channels) return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask && + if ((rxd3 & csum_mask) == csum_mask && !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index bce082038219..1ab2fb292266 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -206,7 +206,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mvif->mt76.omac_idx = idx; mvif->phy = phy; mvif->mt76.band_idx = band_idx; - mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP; + mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
ret = mt7996_mcu_add_dev_info(phy, vif, true); if (ret) @@ -307,6 +307,10 @@ int mt7996_set_channel(struct mt7996_phy *phy) if (ret) goto out;
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH); + if (ret) + goto out; + ret = mt7996_dfs_init_radar_detector(phy); mt7996_mac_cca_stats_reset(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 2e4fa9f48dfb..7c5ec135c685 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -735,7 +735,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb) static struct tlv * mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len) { - struct tlv *ptlv = skb_put(skb, len); + struct tlv *ptlv = skb_put_zero(skb, len);
ptlv->tag = cpu_to_le16(tag); ptlv->len = cpu_to_le16(len); @@ -822,7 +822,7 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct bss_info_uni_mbssid *mbssid; struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator) + if (!vif->bss_conf.bssid_indicator && enable) return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid)); @@ -1429,10 +1429,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
if (bfee) return vif->bss_conf.eht_su_beamformee && - EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); + EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); else return vif->bss_conf.eht_su_beamformer && - EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); + EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); }
if (sta->deflink.he_cap.has_he) { @@ -1544,6 +1544,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map); u8 snd_dim, sts;
+ if (!vc) + return; + bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7996_mcu_sta_sounding_rate(bf); @@ -1653,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_phy *phy = mvif->phy; - int tx_ant = hweight8(phy->mt76->chainmask) - 1; + int tx_ant = hweight16(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; static const u8 matrix[4][4] = { diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index 3c48e1a57b24..bba53307b960 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -384,6 +384,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct wilc_join_bss_param *param; u8 rates_len = 0; int ies_len; + u64 ies_tsf; int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL); @@ -399,6 +400,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, return NULL; } ies_len = ies->len; + ies_tsf = ies->tsf; rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval); @@ -454,7 +456,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *)&noa_attr, sizeof(noa_attr)); if (ret > 0) { - param->tsf_lo = cpu_to_le32(ies->tsf); + param->tsf_lo = cpu_to_le32(ies_tsf); param->noa_enabled = 1; param->idx = noa_attr.index; if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) { diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index de3332eb7a22..a99776af56c2 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_efuse *efuse = &rtwdev->efuse; u8 table_case, tdma_case; - bool wl_cpt_test = false, bt_cpt_test = false;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); if (efuse->share_ant) { /* Shared-Ant */ - if (wl_cpt_test) { - if (coex_stat->wl_gl_busy) { - table_case = 20; - tdma_case = 17; - } else { - table_case = 10; - tdma_case = 15; - } - } else if (bt_cpt_test) { - table_case = 26; - tdma_case = 26; - } else { - if (coex_stat->wl_gl_busy && - coex_stat->wl_noisy_level == 0) - table_case = 14; - else - table_case = 10; + if (coex_stat->wl_gl_busy && + coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10;
- if (coex_stat->wl_gl_busy) - tdma_case = 15; - else - tdma_case = 20; - } + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; } else { /* Non-Shared-Ant */ table_case = 112; @@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) tdma_case = 120; }
- if (wl_cpt_test) - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]); - else - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index ab7d414d0ba6..b9b0114e253b 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1468,10 +1468,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, val |= BIT_ENSWBCN >> 8; rtw_write8(rtwdev, REG_CR + 1, val);
- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); - bckp[1] = val; - val &= ~(BIT_EN_BCNQ_DL >> 16); - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) { + val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); + bckp[1] = val; + val &= ~(BIT_EN_BCNQ_DL >> 16); + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); + }
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); if (ret) { @@ -1496,7 +1498,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, rsvd_pg_head = rtwdev->fifo.rsvd_boundary; rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, rsvd_pg_head | BIT_BCN_VALID_V1); - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret; diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 7ab7a988b123..33a7577557a5 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1313,20 +1313,21 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; + int ret = 0;
fw = &rtwdev->fw; wait_for_completion(&fw->completion); if (!fw->firmware) - return -EINVAL; + ret = -EINVAL;
if (chip->wow_fw_name) { fw = &rtwdev->wow_fw; wait_for_completion(&fw->completion); if (!fw->firmware) - return -EINVAL; + ret = -EINVAL; }
- return 0; + return ret; }
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c index e2c7d9f87683..a019f4085e73 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c @@ -31,8 +31,6 @@ static const struct usb_device_id rtw_8821cu_id_table[] = { .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 62376d1cca22..732d78765220 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -2611,12 +2611,14 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, else rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
- if (rxsc >= 9 && rxsc <= 12) + if (rxsc == 0) + bw = rtwdev->hal.current_band_width; + else if (rxsc >= 1 && rxsc <= 8) + bw = RTW_CHANNEL_WIDTH_20; + else if (rxsc >= 9 && rxsc <= 12) bw = RTW_CHANNEL_WIDTH_40; - else if (rxsc >= 13) - bw = RTW_CHANNEL_WIDTH_80; else - bw = RTW_CHANNEL_WIDTH_20; + bw = RTW_CHANNEL_WIDTH_80;
channel = GET_PHY_STAT_P1_CHANNEL(phy_status); rtw_set_rx_freq_band(pkt_stat, channel); diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h index d3668c4efc24..8a072dd3d73c 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.h +++ b/drivers/net/wireless/realtek/rtw88/rx.h @@ -41,7 +41,7 @@ enum rtw_rx_desc_enc { #define GET_RX_DESC_TSFL(rxdesc) \ le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0)) #define GET_RX_DESC_BW(rxdesc) \ - (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24))) + (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(5, 4)))
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 7019f7d482a8..fc172964349b 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -4278,6 +4278,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->mcc.wait); rtw89_init_wait(&rtwdev->mac.fw_ofld_wait); + rtw89_init_wait(&rtwdev->wow.wait);
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work); INIT_WORK(&rtwdev->ips_work, rtw89_ips_work); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 11fa003a9788..9c282d84743b 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -5293,6 +5293,9 @@ struct rtw89_wow_param { u8 gtk_alg; u8 ptk_keyidx; u8 akm; + + /* see RTW89_WOW_WAIT_COND series for wait condition */ + struct rtw89_wait_info wait; };
struct rtw89_mcc_limit { diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index fbe08c162b93..c322148d6daf 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -6825,11 +6825,10 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) { - struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + struct rtw89_wait_info *wait = &rtwdev->wow.wait; struct rtw89_h2c_wow_aoac *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; - unsigned int cond;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { @@ -6848,8 +6847,7 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) H2C_FUNC_AOAC_REPORT_REQ, 1, 0, len);
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); - return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); + return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); }
/* Return < 0, if failures happen during waiting for the condition. diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index c3b4324c621c..df6f2424fa91 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -3942,8 +3942,11 @@ enum rtw89_wow_h2c_func { NUM_OF_RTW89_WOW_H2C_FUNC, };
-#define RTW89_WOW_WAIT_COND(func) \ - (NUM_OF_RTW89_WOW_H2C_FUNC + (func)) +#define RTW89_WOW_WAIT_COND(tag, func) \ + ((tag) * NUM_OF_RTW89_WOW_H2C_FUNC + (func)) + +#define RTW89_WOW_WAIT_COND_AOAC \ + RTW89_WOW_WAIT_COND(0 /* don't care */, H2C_FUNC_AOAC_REPORT_REQ)
/* CLASS 2 - PS */ #define H2C_CL_MAC_PS 0x2 diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index e2399796aeb1..9a4f23d83bf2 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2728,6 +2728,7 @@ bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val, reg; int ret;
@@ -2766,6 +2767,12 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); }
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx); + rtw89_write32_mask(rtwdev, reg, + B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80); + } + return 0; }
@@ -5144,11 +5151,10 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; - struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + struct rtw89_wait_info *wait = &rtw_wow->wait; const struct rtw89_c2h_wow_aoac_report *c2h = (const struct rtw89_c2h_wow_aoac_report *)skb->data; struct rtw89_completion_data data = {}; - unsigned int cond;
aoac_rpt->rpt_ver = c2h->rpt_ver; aoac_rpt->sec_type = c2h->sec_type; @@ -5166,8 +5172,7 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn); memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); - rtw89_complete_cond(wait, cond, &data); + rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data); }
static void diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index d5895516b3ed..4c619cec602f 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -421,7 +421,6 @@ enum rtw89_mac_c2h_mrc_func {
enum rtw89_mac_c2h_wow_func { RTW89_MAC_C2H_FUNC_AOAC_REPORT, - RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
NUM_OF_RTW89_MAC_C2H_FUNC_WOW, }; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 7df36f3bff0b..b1c24eedc7e0 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2440,6 +2440,10 @@ #define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8) #define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_AX_AGG_LEN_VHT_0 0xC618 +#define R_AX_AGG_LEN_VHT_0_C1 0xE618 +#define B_AX_AMPDU_MAX_LEN_VHT_MASK GENMASK(19, 0) + #define S_AX_CTS2S_TH_SEC_256B 1 #define R_AX_SIFS_SETTING 0xC624 #define R_AX_SIFS_SETTING_C1 0xE624 diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index d86e6ff4523d..5fe9e4e26142 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -71,7 +71,7 @@ MODULE_PARM_DESC(mlo, "Support MLO");
static bool multi_radio; module_param(multi_radio, bool, 0444); -MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy"); +MODULE_PARM_DESC(multi_radio, "Support Multiple Radios per wiphy");
/** * enum hwsim_regtest - the type of regulatory tests we offer diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 9ab836d0d4f1..079b8cd79785 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -778,7 +778,7 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev) ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->ntb.pdev), debugfs_dir); - if (!ndev->debugfs_dir) + if (IS_ERR(ndev->debugfs_dir)) ndev->debugfs_info = NULL; else ndev->debugfs_info = diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 77e55debeed6..ef2855946a99 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -807,16 +807,29 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) }
static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, - struct device *dma_dev, size_t align) + struct device *ntb_dev, size_t align) { dma_addr_t dma_addr; void *alloc_addr, *virt_addr; int rc;
- alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, - &dma_addr, GFP_KERNEL); + /* + * The buffer here is allocated against the NTB device. The reason to + * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer + * backing the NTB BAR for the remote host to write to. During receive + * processing, the data is being copied out of the receive buffer to + * the kernel skbuff. When a DMA device is being used, dma_map_page() + * is called on the kvaddr of the receive buffer (from dma_alloc_*()) + * and remapped against the DMA device. It appears to be a double + * DMA mapping of buffers, but first is mapped to the NTB device and + * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary + * in order for the later dma_map_page() to not fail. + */ + alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size, + &dma_addr, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); if (!alloc_addr) { - dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", + dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n", mw->alloc_size); return -ENOMEM; } @@ -845,7 +858,7 @@ static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, return 0;
err: - dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); + dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr);
return rc; } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 553f1f46bc66..72bc1d017a46 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -1227,7 +1227,7 @@ static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf, "\tOut buffer addr 0x%pK\n", peer->outbuf);
pos += scnprintf(buf + pos, buf_size - pos, - "\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr); + "\tOut buff phys addr %pap\n", &peer->out_phys_addr);
pos += scnprintf(buf + pos, buf_size - pos, "\tOut buffer size %pa\n", &peer->outbuf_size); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index d6d558f94d6b..35d9f3cc2efa 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1937,12 +1937,16 @@ static int cmp_dpa(const void *a, const void *b) static struct device **scan_labels(struct nd_region *nd_region) { int i, count = 0; - struct device *dev, **devs = NULL; + struct device *dev, **devs; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+ devs = kcalloc(2, sizeof(dev), GFP_KERNEL); + if (!devs) + return NULL; + /* "safe" because create_namespace_pmem() might list_move() label_ent */ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; @@ -1961,12 +1965,14 @@ static struct device **scan_labels(struct nd_region *nd_region) goto err; if (i < count) continue; - __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); - if (!__devs) - goto err; - memcpy(__devs, devs, sizeof(dev) * count); - kfree(devs); - devs = __devs; + if (count) { + __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); + if (!__devs) + goto err; + memcpy(__devs, devs, sizeof(dev) * count); + kfree(devs); + devs = __devs; + }
dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { @@ -1993,11 +1999,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
/* Publish a zero-sized namespace for userspace to configure. */ nd_mapping_free_labels(nd_mapping); - - devs = kcalloc(2, sizeof(dev), GFP_KERNEL); - if (!devs) - goto err; - nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); if (!nspm) goto err; @@ -2036,11 +2037,10 @@ static struct device **scan_labels(struct nd_region *nd_region) return devs;
err: - if (devs) { - for (i = 0; devs[i]; i++) - namespace_pmem_release(devs[i]); - kfree(devs); - } + for (i = 0; devs[i]; i++) + namespace_pmem_release(devs[i]); + kfree(devs); + return NULL; }
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 518e22dd4f9b..6d97058cde7a 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -648,7 +648,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) rc = device_add_disk(&head->subsys->dev, head->disk, nvme_ns_attr_groups); if (rc) { - clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); + clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); return; } nvme_add_ns_head_cdev(head); diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 4fe3b0cb72ec..5c62e1a3ba52 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -850,14 +850,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev) dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler, - IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + IRQF_SHARED | IRQF_ONESHOT, + "dra7xx-pcie-main", dra7xx); if (ret) { dev_err(dev, "failed to request irq\n"); - goto err_gpio; + goto err_deinit; }
return 0;
+err_deinit: + if (dra7xx->mode == DW_PCIE_RC_TYPE) + dw_pcie_host_deinit(&dra7xx->pci->pp); + else + dw_pcie_ep_deinit(&dra7xx->pci->ep); + err_gpio: err_get_sync: pm_runtime_put(dev); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 964d67756eb2..eaec471c4623 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -953,7 +953,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) ret = phy_power_on(imx6_pcie->phy); if (ret) { dev_err(dev, "waiting for PHY ready timeout!\n"); - goto err_phy_off; + goto err_phy_exit; } }
@@ -968,8 +968,9 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp) return 0;
err_phy_off: - if (imx6_pcie->phy) - phy_exit(imx6_pcie->phy); + phy_power_off(imx6_pcie->phy); +err_phy_exit: + phy_exit(imx6_pcie->phy); err_clk_disable: imx6_pcie_clk_disable(imx6_pcie); err_reg_disable: @@ -1113,6 +1114,8 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT)) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ep->page_size = imx6_pcie->drvdata->epc_features->align; + ret = dw_pcie_ep_init(ep); if (ret) { dev_err(dev, "failed to initialize endpoint\n"); @@ -1562,7 +1565,8 @@ static const struct imx6_pcie_drvdata drvdata[] = { }, [IMX8MM_EP] = { .variant = IMX8MM_EP, - .flags = IMX6_PCIE_FLAG_HAS_PHYDRV, + .flags = IMX6_PCIE_FLAG_HAS_APP_RESET | + IMX6_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mm-iomuxc-gpr", .clk_names = imx8mm_clks, @@ -1573,7 +1577,8 @@ static const struct imx6_pcie_drvdata drvdata[] = { }, [IMX8MP_EP] = { .variant = IMX8MP_EP, - .flags = IMX6_PCIE_FLAG_HAS_PHYDRV, + .flags = IMX6_PCIE_FLAG_HAS_APP_RESET | + IMX6_PCIE_FLAG_HAS_PHYDRV, .mode = DW_PCIE_EP_TYPE, .gpr = "fsl,imx8mp-iomuxc-gpr", .clk_names = imx8mm_clks, diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 52c6420ae200..95a471d6a586 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -577,7 +577,7 @@ static void ks_pcie_quirk(struct pci_dev *dev) */ if (pci_match_id(am6_pci_devids, bridge)) { bridge_dev = pci_get_host_bridge_device(dev); - if (!bridge_dev && !bridge_dev->parent) + if (!bridge_dev || !bridge_dev->parent) return;
ks_pcie = dev_get_drvdata(bridge_dev->parent); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index 0a29136491b8..85a2c77b1835 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -420,11 +420,11 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie, "unable to get a valid reset gpio\n"); }
- pcie->num_slots++; - if (pcie->num_slots > MAX_PCI_SLOTS) { + if (pcie->num_slots + 1 >= MAX_PCI_SLOTS) { dev_err(dev, "Too many PCI slots!\n"); return -EINVAL; } + pcie->num_slots++;
ret = of_pci_get_devfn(child); if (ret < 0) { diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index a9b263f749b6..c8bb7cd89678 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -858,21 +858,15 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev) if (ret) return ret;
- ret = qcom_pcie_enable_resources(pcie_ep); - if (ret) { - dev_err(dev, "Failed to enable resources: %d\n", ret); - return ret; - } - ret = dw_pcie_ep_init(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to initialize endpoint: %d\n", ret); - goto err_disable_resources; + return ret; }
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); if (ret) - goto err_disable_resources; + goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); if (!name) { @@ -889,8 +883,8 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev) disable_irq(pcie_ep->global_irq); disable_irq(pcie_ep->perst_irq);
-err_disable_resources: - qcom_pcie_disable_resources(pcie_ep); +err_ep_deinit: + dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret; } diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 0408f4d612b5..7417993f8cff 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -80,8 +80,8 @@ #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) #define MSGF_MISC_SR_FATAL_DEV BIT(23) #define MSGF_MISC_SR_LINK_DOWN BIT(24) -#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) -#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) +#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25) +#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ MSGF_MISC_SR_RXMSG_OVER | \ @@ -96,8 +96,8 @@ MSGF_MISC_SR_NON_FATAL_DEV | \ MSGF_MISC_SR_FATAL_DEV | \ MSGF_MISC_SR_LINK_DOWN | \ - MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ - MSGF_MSIC_SR_LINK_BWIDTH) + MSGF_MISC_SR_LINK_AUTO_BWIDTH | \ + MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */ #define MSGF_LEG_SR_INTA BIT(0) @@ -299,10 +299,10 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) if (misc_stat & MSGF_MISC_SR_FATAL_DEV) dev_err(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) + if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH) dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) + if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH) dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */ @@ -371,7 +371,7 @@ static void nwl_mask_intx_irq(struct irq_data *data) u32 mask; u32 val;
- mask = 1 << (data->hwirq - 1); + mask = 1 << data->hwirq; raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); @@ -385,7 +385,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data) u32 mask; u32 val;
- mask = 1 << (data->hwirq - 1); + mask = 1 << data->hwirq; raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); @@ -779,6 +779,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) return -ENODEV;
pcie = pci_host_bridge_priv(bridge); + platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
@@ -801,13 +802,13 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = nwl_pcie_bridge_init(pcie); if (err) { dev_err(dev, "HW Initialization failed\n"); - return err; + goto err_clk; }
err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); - return err; + goto err_clk; }
bridge->sysdata = pcie; @@ -817,11 +818,24 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = nwl_pcie_enable_msi(pcie); if (err < 0) { dev_err(dev, "failed to enable MSI support: %d\n", err); - return err; + goto err_clk; } }
- return pci_host_probe(bridge); + err = pci_host_probe(bridge); + if (!err) + return 0; + +err_clk: + clk_disable_unprepare(pcie->clk); + return err; +} + +static void nwl_pcie_remove(struct platform_device *pdev) +{ + struct nwl_pcie *pcie = platform_get_drvdata(pdev); + + clk_disable_unprepare(pcie->clk); }
static struct platform_driver nwl_pcie_driver = { @@ -831,5 +845,6 @@ static struct platform_driver nwl_pcie_driver = { .of_match_table = nwl_pcie_of_match, }, .probe = nwl_pcie_probe, + .remove_new = nwl_pcie_remove, }; builtin_platform_driver(nwl_pcie_driver); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ffaaca0978cb..ad2d571ccbc1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1324,7 +1324,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) if (delay > PCI_RESET_WAIT) { if (retrain) { retrain = false; - if (pcie_failed_link_retrain(bridge)) { + if (pcie_failed_link_retrain(bridge) == 0) { delay = 1; continue; } @@ -4718,7 +4718,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); }
- return pcie_wait_for_link_status(pdev, use_lt, !use_lt); + rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); + + /* + * Clear LBMS after a manual retrain so that the bit can be used + * to track link speed or width changes made by hardware itself + * in attempt to correct unreliable link operation. + */ + pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); + return rc; }
/** @@ -5672,8 +5680,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_restore(dev); - if (dev->subordinate) + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "bus reset"); pci_bus_restore_locked(dev->subordinate); + } } }
@@ -5707,8 +5717,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot) if (!dev->slot || dev->slot != slot) continue; pci_dev_restore(dev); - if (dev->subordinate) + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "slot reset"); pci_bus_restore_locked(dev->subordinate); + } } }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 79c8398f3938..7c06e55c5072 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -606,7 +606,7 @@ void pci_acs_init(struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); -bool pcie_failed_link_retrain(struct pci_dev *dev); +int pcie_failed_link_retrain(struct pci_dev *dev); #else static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) @@ -621,9 +621,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) { return -ENOTTY; } -static inline bool pcie_failed_link_retrain(struct pci_dev *dev) +static inline int pcie_failed_link_retrain(struct pci_dev *dev) { - return false; + return -ENOTTY; } #endif
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a2ce4e08edf5..5d57ea27dbc4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -66,7 +66,7 @@ * apply this erratum workaround to any downstream ports as long as they * support Link Active reporting and have the Link Control 2 register. * Restrict the speed to 2.5GT/s then with the Target Link Speed field, - * request a retrain and wait 200ms for the data link to go up. + * request a retrain and check the result. * * If this turns out successful and we know by the Vendor:Device ID it is * safe to do so, then lift the restriction, letting the devices negotiate @@ -74,33 +74,45 @@ * firmware may have already arranged and lift it with ports that already * report their data link being up. * - * Return TRUE if the link has been successfully retrained, otherwise FALSE. + * Otherwise revert the speed to the original setting and request a retrain + * again to remove any residual state, ignoring the result as it's supposed + * to fail anyway. + * + * Return 0 if the link has been successfully retrained. Return an error + * if retraining was not needed or we attempted a retrain and it failed. */ -bool pcie_failed_link_retrain(struct pci_dev *dev) +int pcie_failed_link_retrain(struct pci_dev *dev) { static const struct pci_device_id ids[] = { { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */ {} }; u16 lnksta, lnkctl2; + int ret = -ENOTTY;
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) || !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) - return false; + return ret;
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == PCI_EXP_LNKSTA_LBMS) { + u16 oldlnkctl2 = lnkctl2; + pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) { + ret = pcie_retrain_link(dev, false); + if (ret) { pci_info(dev, "retraining failed\n"); - return false; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, + oldlnkctl2); + pcie_retrain_link(dev, true); + return ret; }
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); @@ -117,13 +129,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev) lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
- if (pcie_retrain_link(dev, false)) { + ret = pcie_retrain_link(dev, false); + if (ret) { pci_info(dev, "retraining failed\n"); - return false; + return ret; } }
- return true; + return ret; }
static ktime_t fixup_debug_start(struct pci_dev *dev, diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c index 38a2947ae813..c6ff1bc7d336 100644 --- a/drivers/perf/alibaba_uncore_drw_pmu.c +++ b/drivers/perf/alibaba_uncore_drw_pmu.c @@ -400,7 +400,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data) }
/* clear common counter intr status */ - clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1); + clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status); writel(clr_status, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR); } diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index c932d9d355cf..48863b31ccfb 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -24,14 +24,6 @@ #define CMN_NI_NODE_ID GENMASK_ULL(31, 16) #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
-#define CMN_NODEID_DEVID(reg) ((reg) & 3) -#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) -#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) -#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) -#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) -#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) -#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) - #define CMN_CHILD_INFO 0x0080 #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) @@ -43,6 +35,9 @@ #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+/* Currently XPs are the node type we can have most of; others top out at 128 */ +#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS + /* The CFG node has various info besides the discovery tree */ #define CMN_CFGM_PERIPH_ID_01 0x0008 #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0) @@ -78,7 +73,8 @@ /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */ #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
-/* HN-Ps are weird... */ +/* Some types are designed to coexist with another device in the same node */ +#define CMN_CCLA_PMU_EVENT_SEL 0x008 #define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */ @@ -280,8 +276,11 @@ struct arm_cmn_node { u16 id, logid; enum cmn_node_type type;
+ /* XP properties really, but replicated to children for convenience */ u8 dtm; s8 dtc; + u8 portid_bits:4; + u8 deviceid_bits:4; /* DN/HN-F/CXHA */ struct { u8 val : 4; @@ -357,49 +356,33 @@ struct arm_cmn { static int arm_cmn_hp_state;
struct arm_cmn_nodeid { - u8 x; - u8 y; u8 port; u8 dev; };
static int arm_cmn_xyidbits(const struct arm_cmn *cmn) { - return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2); + return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1)); }
-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) +static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn) { struct arm_cmn_nodeid nid;
- if (cmn->num_xps == 1) { - nid.x = 0; - nid.y = 0; - nid.port = CMN_NODEID_1x1_PID(id); - nid.dev = CMN_NODEID_DEVID(id); - } else { - int bits = arm_cmn_xyidbits(cmn); - - nid.x = CMN_NODEID_X(id, bits); - nid.y = CMN_NODEID_Y(id, bits); - if (cmn->ports_used & 0xc) { - nid.port = CMN_NODEID_EXT_PID(id); - nid.dev = CMN_NODEID_EXT_DEVID(id); - } else { - nid.port = CMN_NODEID_PID(id); - nid.dev = CMN_NODEID_DEVID(id); - } - } + nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1); + nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1); return nid; }
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, const struct arm_cmn_node *dn) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); - int xp_idx = cmn->mesh_x * nid.y + nid.x; + int id = dn->id >> (dn->portid_bits + dn->deviceid_bits); + int bits = arm_cmn_xyidbits(cmn); + int x = id >> bits; + int y = id & ((1U << bits) - 1);
- return cmn->xps + xp_idx; + return cmn->xps + cmn->mesh_x * y + x; } static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, enum cmn_node_type type) @@ -485,13 +468,13 @@ static const char *arm_cmn_device_type(u8 type) } }
-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) +static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d) { struct arm_cmn *cmn = s->private; struct arm_cmn_node *dn; + u16 id = xp->id | d | (p << xp->deviceid_bits);
for (dn = cmn->dns; dn->type; dn++) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP) @@ -500,7 +483,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) if (dn->type < CMN_TYPE_HNI) continue;
- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) + if (dn->id != id) continue;
seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); @@ -521,6 +504,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) y = cmn->mesh_y; while (y--) { int xp_base = cmn->mesh_x * y; + struct arm_cmn_node *xp = cmn->xps + xp_base; u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++) @@ -528,16 +512,14 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_printf(s, "\n%-2d |", y); for (x = 0; x < cmn->mesh_x; x++) { - struct arm_cmn_node *xp = cmn->xps + xp_base + x; - for (p = 0; p < CMN_MAX_PORTS; p++) - port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); + port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p); seq_printf(s, " XP #%-3d|", xp_base + x); }
seq_puts(s, "\n |"); for (x = 0; x < cmn->mesh_x; x++) { - s8 dtc = cmn->xps[xp_base + x].dtc; + s8 dtc = xp[x].dtc;
if (dtc < 0) seq_puts(s, " DTC ?? |"); @@ -554,10 +536,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) seq_puts(s, arm_cmn_device_type(port[p][x])); seq_puts(s, "\n 0|"); for (x = 0; x < cmn->mesh_x; x++) - arm_cmn_show_logid(s, x, y, p, 0); + arm_cmn_show_logid(s, xp + x, p, 0); seq_puts(s, "\n 1|"); for (x = 0; x < cmn->mesh_x; x++) - arm_cmn_show_logid(s, x, y, p, 1); + arm_cmn_show_logid(s, xp + x, p, 1); } seq_puts(s, "\n-----+"); } @@ -585,7 +567,7 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event { struct arm_cmn_node *dn; - u64 dtm_idx[4]; + u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)]; s8 dtc_idx[CMN_MAX_DTCS]; u8 num_dns; u8 dtm_offset; @@ -1815,10 +1797,7 @@ static int arm_cmn_event_init(struct perf_event *event) }
if (!hw->num_dns) { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); - - dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", - nodeid, nid.x, nid.y, nid.port, nid.dev, type); + dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type); return -EINVAL; }
@@ -1921,7 +1900,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i); writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); } else { - struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + struct arm_cmn_nodeid nid = arm_cmn_nid(dn);
if (cmn->multi_dtm) nid.port %= 2; @@ -2168,10 +2147,12 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) continue;
xp = arm_cmn_node_to_xp(cmn, dn); + dn->portid_bits = xp->portid_bits; + dn->deviceid_bits = xp->deviceid_bits; dn->dtc = xp->dtc; dn->dtm = xp->dtm; if (cmn->multi_dtm) - dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; + dn->dtm += arm_cmn_nid(dn).port / 2;
if (dn->type == CMN_TYPE_DTC) { int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); @@ -2341,18 +2322,27 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) arm_cmn_init_dtm(dtm++, xp, 0); /* * Keeping track of connected ports will let us filter out - * unnecessary XP events easily. We can also reliably infer the - * "extra device ports" configuration for the node ID format - * from this, since in that case we will see at least one XP - * with port 2 connected, for the HN-D. + * unnecessary XP events easily, and also infer the per-XP + * part of the node ID format. */ for (int p = 0; p < CMN_MAX_PORTS; p++) if (arm_cmn_device_connect_info(cmn, xp, p)) xp_ports |= BIT(p);
- if (cmn->multi_dtm && (xp_ports & 0xc)) + if (cmn->num_xps == 1) { + xp->portid_bits = 3; + xp->deviceid_bits = 2; + } else if (xp_ports > 0x3) { + xp->portid_bits = 2; + xp->deviceid_bits = 1; + } else { + xp->portid_bits = 1; + xp->deviceid_bits = 2; + } + + if (cmn->multi_dtm && (xp_ports > 0x3)) arm_cmn_init_dtm(dtm++, xp, 1); - if (cmn->multi_dtm && (xp_ports & 0x30)) + if (cmn->multi_dtm && (xp_ports > 0xf)) arm_cmn_init_dtm(dtm++, xp, 2);
cmn->ports_used |= xp_ports; @@ -2407,10 +2397,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_CXHA: case CMN_TYPE_CCRA: case CMN_TYPE_CCHA: - case CMN_TYPE_CCLA: case CMN_TYPE_HNS: dn++; break; + case CMN_TYPE_CCLA: + dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL; + dn++; + break; /* Nothing to see here */ case CMN_TYPE_MPAM_S: case CMN_TYPE_MPAM_NS: @@ -2428,7 +2421,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_HNP: case CMN_TYPE_CCLA_RNI: dn[1] = dn[0]; - dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL; + dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL; dn[1].type = arm_cmn_subtype(dn->type); dn += 2; break; diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index c5e328f23841..f205ecad2e4c 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -556,10 +556,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev) { struct platform_device *plat_dev; struct dwc_pcie_dev_info *dev_info; - u32 bdf; + u32 sbdf;
- bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf, pdev, sizeof(*pdev));
if (IS_ERR(plat_dev)) @@ -611,15 +611,15 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) struct pci_dev *pdev = plat_dev->dev.platform_data; struct dwc_pcie_pmu *pcie_pmu; char *name; - u32 bdf, val; + u32 sbdf, val; u16 vsec; int ret;
vsec = pci_find_vsec_capability(pdev, pdev->vendor, DWC_PCIE_VSEC_RAS_DES_ID); pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + sbdf = plat_dev->id; + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf); if (!name) return -ENOMEM;
@@ -650,7 +650,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, &pcie_pmu->cpuhp_node); if (ret) { - pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf); return ret; }
@@ -663,7 +663,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); if (ret) { - pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf); return ret; } ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, @@ -726,7 +726,6 @@ static struct platform_driver dwc_pcie_pmu_driver = { static int __init dwc_pcie_pmu_init(void) { struct pci_dev *pdev = NULL; - bool found = false; int ret;
for_each_pci_dev(pdev) { @@ -738,11 +737,7 @@ static int __init dwc_pcie_pmu_init(void) pci_dev_put(pdev); return ret; } - - found = true; } - if (!found) - return -ENODEV;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/dwc_pcie_pmu:online", diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index f06027574a24..f7d6c59d9930 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -208,7 +208,7 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event) { u64 port, trig_len, thr_len, len_mode; - u64 reg = HISI_PCIE_INIT_SET; + u64 reg = 0;
/* Config HISI_PCIE_EVENT_CTRL according to event. */ reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); @@ -452,10 +452,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event) struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + u64 orig_cnt, cnt; + + orig_cnt = hisi_pcie_pmu_read_counter(event);
local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); + + /* + * The counter maybe unwritable if the target event is unsupported. + * Check this by comparing the counts after setting the period. If + * the counts stay unchanged after setting the period then update + * the hwc->prev_count correctly. Otherwise the final counts user + * get maybe totally wrong. + */ + cnt = hisi_pcie_pmu_read_counter(event); + if (orig_cnt == cnt) + local64_set(&hwc->prev_count, cnt); }
static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index 946c01210ac8..3bd9b62b23dc 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -15,6 +15,7 @@ #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/rational.h> #include <linux/regmap.h> #include <linux/reset.h> diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c index 1947da73e512..dce601d99372 100644 --- a/drivers/pinctrl/mvebu/pinctrl-dove.c +++ b/drivers/pinctrl/mvebu/pinctrl-dove.c @@ -767,7 +767,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev) struct resource fb_res; struct mvebu_mpp_ctrl_data *mpp_data; void __iomem *base; - int i; + int i, ret;
pdev->dev.platform_data = (void *)device_get_match_data(&pdev->dev);
@@ -783,13 +783,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) clk_prepare_enable(clk);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mpp_res); - if (IS_ERR(base)) - return PTR_ERR(base); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto err_probe; + }
mpp_data = devm_kcalloc(&pdev->dev, dove_pinctrl_info.ncontrols, sizeof(*mpp_data), GFP_KERNEL); - if (!mpp_data) - return -ENOMEM; + if (!mpp_data) { + ret = -ENOMEM; + goto err_probe; + }
dove_pinctrl_info.control_data = mpp_data; for (i = 0; i < ARRAY_SIZE(dove_mpp_controls); i++) @@ -808,8 +812,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) }
mpp4_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mpp4_base)) - return PTR_ERR(mpp4_base); + if (IS_ERR(mpp4_base)) { + ret = PTR_ERR(mpp4_base); + goto err_probe; + }
res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) { @@ -820,8 +826,10 @@ static int dove_pinctrl_probe(struct platform_device *pdev) }
pmu_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmu_base)) - return PTR_ERR(pmu_base); + if (IS_ERR(pmu_base)) { + ret = PTR_ERR(pmu_base); + goto err_probe; + }
gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config"); if (IS_ERR(gconfmap)) { @@ -831,12 +839,17 @@ static int dove_pinctrl_probe(struct platform_device *pdev) adjust_resource(&fb_res, (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14); gc_base = devm_ioremap_resource(&pdev->dev, &fb_res); - if (IS_ERR(gc_base)) - return PTR_ERR(gc_base); + if (IS_ERR(gc_base)) { + ret = PTR_ERR(gc_base); + goto err_probe; + } + gconfmap = devm_regmap_init_mmio(&pdev->dev, gc_base, &gc_regmap_config); - if (IS_ERR(gconfmap)) - return PTR_ERR(gconfmap); + if (IS_ERR(gconfmap)) { + ret = PTR_ERR(gconfmap); + goto err_probe; + } }
/* Warn on any missing DT resource */ @@ -844,6 +857,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev) dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
return mvebu_pinctrl_probe(pdev); +err_probe: + clk_disable_unprepare(clk); + return ret; }
static struct platform_driver dove_pinctrl_driver = { diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 4da3c3f422b6..2ec599e383e4 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1913,7 +1913,8 @@ static int pcs_probe(struct platform_device *pdev)
dev_info(pcs->dev, "%i pins, size %u\n", pcs->desc.npins, pcs->size);
- if (pinctrl_enable(pcs->pctl)) + ret = pinctrl_enable(pcs->pctl); + if (ret) goto free;
return 0; diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 632180570b70..3ef20f2fa88e 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -1261,7 +1261,9 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev, break;
case PIN_CONFIG_OUTPUT_ENABLE: - if (!pctrl->data->oen_read || !(cfg & PIN_CFG_OEN)) + if (!(cfg & PIN_CFG_OEN)) + return -EINVAL; + if (!pctrl->data->oen_read) return -EOPNOTSUPP; arg = pctrl->data->oen_read(pctrl, _pin); if (!arg) @@ -1402,7 +1404,9 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE: arg = pinconf_to_config_argument(_configs[i]); - if (!pctrl->data->oen_write || !(cfg & PIN_CFG_OEN)) + if (!(cfg & PIN_CFG_OEN)) + return -EINVAL; + if (!pctrl->data->oen_write) return -EOPNOTSUPP; ret = pctrl->data->oen_write(pctrl, _pin, !!arg); if (ret) diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index f5e5a23d2226..451801acdc40 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -273,6 +273,22 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, return r; }
+/** + * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device + * @data: IODelay device + * + * Deinitialize the IODelay device (basically just lock the region back up. + */ +static void ti_iodelay_pinconf_deinit_dev(void *data) +{ + struct ti_iodelay_device *iod = data; + const struct ti_iodelay_reg_data *reg = iod->reg_data; + + /* lock the iodelay region back again */ + regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, + reg->global_lock_mask, reg->global_lock_val); +} + /** * ti_iodelay_pinconf_init_dev() - Initialize IODelay device * @iod: iodelay device @@ -295,6 +311,11 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) if (r) return r;
+ r = devm_add_action_or_reset(iod->dev, ti_iodelay_pinconf_deinit_dev, + iod); + if (r) + return r; + /* Read up Recalibration sequence done by bootloader */ r = regmap_read(iod->regmap, reg->reg_refclk_offset, &val); if (r) @@ -353,21 +374,6 @@ static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) return 0; }
-/** - * ti_iodelay_pinconf_deinit_dev() - deinit the iodelay device - * @iod: IODelay device - * - * Deinitialize the IODelay device (basically just lock the region back up. - */ -static void ti_iodelay_pinconf_deinit_dev(struct ti_iodelay_device *iod) -{ - const struct ti_iodelay_reg_data *reg = iod->reg_data; - - /* lock the iodelay region back again */ - regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, - reg->global_lock_mask, reg->global_lock_val); -} - /** * ti_iodelay_get_pingroup() - Find the group mapped by a group selector * @iod: iodelay device @@ -877,27 +883,11 @@ static int ti_iodelay_probe(struct platform_device *pdev) return ret; }
- platform_set_drvdata(pdev, iod); - return pinctrl_enable(iod->pctl); }
-/** - * ti_iodelay_remove() - standard remove - * @pdev: platform device - */ -static void ti_iodelay_remove(struct platform_device *pdev) -{ - struct ti_iodelay_device *iod = platform_get_drvdata(pdev); - - ti_iodelay_pinconf_deinit_dev(iod); - - /* Expect other allocations to be freed by devm */ -} - static struct platform_driver ti_iodelay_driver = { .probe = ti_iodelay_probe, - .remove_new = ti_iodelay_remove, .driver = { .name = DRIVER_NAME, .of_match_table = ti_iodelay_of_match, diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c index ad953fb3c37a..9a1d9292dc9a 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-trng.c +++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c @@ -70,8 +70,8 @@ int omnia_mcu_register_trng(struct omnia_mcu *mcu)
irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)]; irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx)); - if (!irq) - return dev_err_probe(dev, -ENXIO, "Cannot get TRNG IRQ\n"); + if (irq < 0) + return dev_err_probe(dev, irq, "Cannot get TRNG IRQ\n");
/* * If someone else cleared the TRNG interrupt but did not read the diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 98ec30fce9fd..b58df617d4fd 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -419,13 +419,14 @@ static ssize_t camera_power_show(struct device *dev, char *buf) { struct ideapad_private *priv = dev_get_drvdata(dev); - unsigned long result; + unsigned long result = 0; int err;
- scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result); - if (err) - return err; + if (err) + return err; + }
return sysfs_emit(buf, "%d\n", !!result); } @@ -442,10 +443,11 @@ static ssize_t camera_power_store(struct device *dev, if (err) return err;
- scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state); - if (err) - return err; + if (err) + return err; + }
return count; } @@ -493,13 +495,14 @@ static ssize_t fan_mode_show(struct device *dev, char *buf) { struct ideapad_private *priv = dev_get_drvdata(dev); - unsigned long result; + unsigned long result = 0; int err;
- scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result); - if (err) - return err; + if (err) + return err; + }
return sysfs_emit(buf, "%lu\n", result); } @@ -519,10 +522,11 @@ static ssize_t fan_mode_store(struct device *dev, if (state > 4 || state == 3) return -EINVAL;
- scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state); - if (err) - return err; + if (err) + return err; + }
return count; } @@ -602,13 +606,14 @@ static ssize_t touchpad_show(struct device *dev, char *buf) { struct ideapad_private *priv = dev_get_drvdata(dev); - unsigned long result; + unsigned long result = 0; int err;
- scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result); - if (err) - return err; + if (err) + return err; + }
priv->r_touchpad_val = result;
@@ -627,10 +632,11 @@ static ssize_t touchpad_store(struct device *dev, if (err) return err;
- scoped_guard(mutex, &priv->vpc_mutex) + scoped_guard(mutex, &priv->vpc_mutex) { err = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state); - if (err) - return err; + if (err) + return err; + }
priv->r_touchpad_val = state;
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 7a61aa88c061..acdc3e7b2eae 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -3190,7 +3190,7 @@ static void mode_status_str(struct seq_file *s, struct device *dev)
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- seq_printf(s, "%20s", gpd_data->hw_mode ? "HW" : "SW"); + seq_printf(s, "%9s", gpd_data->hw_mode ? "HW" : "SW"); }
static void perf_status_str(struct seq_file *s, struct device *dev) @@ -3198,7 +3198,7 @@ static void perf_status_str(struct seq_file *s, struct device *dev) struct generic_pm_domain_data *gpd_data;
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - seq_put_decimal_ull(s, "", gpd_data->performance_state); + seq_printf(s, "%-10u ", gpd_data->performance_state); }
static int genpd_summary_one(struct seq_file *s, @@ -3226,7 +3226,7 @@ static int genpd_summary_one(struct seq_file *s, else snprintf(state, sizeof(state), "%s", status_lookup[genpd->status]); - seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state); + seq_printf(s, "%-30s %-49s %u", genpd->name, state, genpd->performance_state);
/* * Modifications on the list require holding locks on both diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index 6ac5c80cfda2..7520b599eb3d 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -303,11 +303,11 @@ static int axp20x_battery_get_prop(struct power_supply *psy, val->intval = reg & AXP209_FG_PERCENT; break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: return axp20x_batt->data->get_max_voltage(axp20x_batt, &val->intval);
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: ret = regmap_read(axp20x_batt->regmap, AXP20X_V_OFF, ®); if (ret) return ret; @@ -455,10 +455,10 @@ static int axp20x_battery_set_prop(struct power_supply *psy, struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy);
switch (psp) { - case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: return axp20x_set_voltage_min_design(axp20x_batt, val->intval);
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: return axp20x_batt->data->set_max_voltage(axp20x_batt, val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: @@ -493,8 +493,8 @@ static enum power_supply_property axp20x_battery_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_HEALTH, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, - POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_CAPACITY, };
@@ -502,8 +502,8 @@ static int axp20x_battery_prop_writeable(struct power_supply *psy, enum power_supply_property psp) { return psp == POWER_SUPPLY_PROP_STATUS || - psp == POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN || - psp == POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MIN || + psp == POWER_SUPPLY_PROP_VOLTAGE_MAX || psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT || psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX; } diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index e7d37e422c3f..496c3e1f2ee6 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -853,7 +853,10 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) /* program interrupt thresholds such that we should * get interrupt for every 'off' perc change in the soc */ - regmap_read(map, MAX17042_RepSOC, &soc); + if (chip->pdata->enable_current_sense) + regmap_read(map, MAX17042_RepSOC, &soc); + else + regmap_read(map, MAX17042_VFSOC, &soc); soc >>= 8; soc_tr = (soc + off) << 8; if (off < soc) diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 7c0cea2c828d..b6f682dac42e 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -740,7 +740,7 @@ static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim) { struct rapl_primitive_info *rpi = rp->priv->rpi;
- if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi) + if (prim < 0 || prim >= NR_RAPL_PRIMITIVES || !rpi) return NULL;
return &rpi[prim]; diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 63d03a0df5cc..abaffb4e1c1c 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -149,6 +149,9 @@ static void parport_attach(struct parport *port) }
index = ida_alloc(&pps_client_index, GFP_KERNEL); + if (index < 0) + goto err_free_device; + memset(&pps_client_cb, 0, sizeof(pps_client_cb)); pps_client_cb.private = device; pps_client_cb.irq_func = parport_irq; @@ -159,7 +162,7 @@ static void parport_attach(struct parport *port) index); if (!device->pardev) { pr_err("couldn't register with %s\n", port->name); - goto err_free; + goto err_free_ida; }
if (parport_claim_or_block(device->pardev) < 0) { @@ -187,8 +190,9 @@ static void parport_attach(struct parport *port) parport_release(device->pardev); err_unregister_dev: parport_unregister_device(device->pardev); -err_free: +err_free_ida: ida_free(&pps_client_index, index); +err_free_device: kfree(device); }
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 03afc160fc72..86b680adbf01 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -777,7 +777,7 @@ int of_regulator_bulk_get_all(struct device *dev, struct device_node *np, name[i] = '\0'; tmp = regulator_get(dev, name); if (IS_ERR(tmp)) { - ret = -EINVAL; + ret = PTR_ERR(tmp); goto error; } (*consumers)[n].consumer = tmp; diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 144c8e9a642e..448b9a5438e0 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -210,7 +210,7 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = { /* QSPI Code - alias */ { 0x08000000, 0x08000000, 0x08000000, 0 }, /* DDR (Code) - alias */ - { 0x10000000, 0x80000000, 0x0FFE0000, 0 }, + { 0x10000000, 0x40000000, 0x0FFE0000, 0 }, /* TCML */ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM}, /* TCMU */ @@ -1076,6 +1076,8 @@ static int imx_rproc_probe(struct platform_device *pdev) return -ENOMEM; }
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); + ret = imx_rproc_xtr_mbox_init(rproc); if (ret) goto err_put_wkq; @@ -1094,8 +1096,6 @@ static int imx_rproc_probe(struct platform_device *pdev) if (ret) goto err_put_scu;
- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work); - if (rproc->state != RPROC_DETACHED) rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c index 2537ec05ecee..578fe867080c 100644 --- a/drivers/reset/reset-berlin.c +++ b/drivers/reset/reset-berlin.c @@ -68,13 +68,14 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
static int berlin2_reset_probe(struct platform_device *pdev) { - struct device_node *parent_np = of_get_parent(pdev->dev.of_node); + struct device_node *parent_np; struct berlin_reset_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
+ parent_np = of_get_parent(pdev->dev.of_node); priv->regmap = syscon_node_to_regmap(parent_np); of_node_put(parent_np); if (IS_ERR(priv->regmap)) diff --git a/drivers/reset/reset-k210.c b/drivers/reset/reset-k210.c index b62a2fd44e4e..e77e4cca377d 100644 --- a/drivers/reset/reset-k210.c +++ b/drivers/reset/reset-k210.c @@ -90,7 +90,7 @@ static const struct reset_control_ops k210_rst_ops = { static int k210_rst_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *parent_np = of_get_parent(dev->of_node); + struct device_node *parent_np; struct k210_rst *ksr;
dev_info(dev, "K210 reset controller\n"); @@ -99,6 +99,7 @@ static int k210_rst_probe(struct platform_device *pdev) if (!ksr) return -ENOMEM;
+ parent_np = of_get_parent(dev->of_node); ksr->map = syscon_node_to_regmap(parent_np); of_node_put(parent_np); if (IS_ERR(ksr->map)) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index f9f682f19415..3ba4e1c5e15d 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -107,6 +107,7 @@ debug_info_t *ap_dbf_info; static bool ap_scan_bus(void); static bool ap_scan_bus_result; /* result of last ap_scan_bus() */ static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */ +static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */ static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */ static int ap_scan_bus_time = AP_CONFIG_TIME; static struct timer_list ap_scan_bus_timer; @@ -1006,11 +1007,25 @@ bool ap_bus_force_rescan(void) if (scan_counter <= 0) goto out;
+ /* + * There is one unlikely but nevertheless valid scenario where the + * thread holding the mutex may try to send some crypto load but + * all cards are offline so a rescan is triggered which causes + * a recursive call of ap_bus_force_rescan(). A simple return if + * the mutex is already locked by this thread solves this. + */ + if (mutex_is_locked(&ap_scan_bus_mutex)) { + if (ap_scan_bus_task == current) + goto out; + } + /* Try to acquire the AP scan bus mutex */ if (mutex_trylock(&ap_scan_bus_mutex)) { /* mutex acquired, run the AP bus scan */ + ap_scan_bus_task = current; ap_scan_bus_result = ap_scan_bus(); rc = ap_scan_bus_result; + ap_scan_bus_task = NULL; mutex_unlock(&ap_scan_bus_mutex); goto out; } @@ -2284,7 +2299,9 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused) * system_long_wq which invokes this function here again. */ if (mutex_trylock(&ap_scan_bus_mutex)) { + ap_scan_bus_task = current; ap_scan_bus_result = ap_scan_bus(); + ap_scan_bus_task = NULL; mutex_unlock(&ap_scan_bus_mutex); } } diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index cea3a79d538e..00e245173320 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -1485,6 +1485,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char **data) { struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected); int c = *count; unsigned char p = *phase; unsigned char *d = *data; @@ -1496,7 +1497,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, return -1; }
- NCR5380_to_ncmd(hostdata->connected)->phase = p; + ncmd->phase = p;
if (p & SR_IO) { if (hostdata->read_overruns) @@ -1608,45 +1609,44 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, * request. */
- if (hostdata->flags & FLAG_DMA_FIXUP) { - if (p & SR_IO) { - /* - * The workaround was to transfer fewer bytes than we - * intended to with the pseudo-DMA read function, wait for - * the chip to latch the last byte, read it, and then disable - * pseudo-DMA mode. - * - * After REQ is asserted, the NCR5380 asserts DRQ and ACK. - * REQ is deasserted when ACK is asserted, and not reasserted - * until ACK goes false. Since the NCR5380 won't lower ACK - * until DACK is asserted, which won't happen unless we twiddle - * the DMA port or we take the NCR5380 out of DMA mode, we - * can guarantee that we won't handshake another extra - * byte. - */ - - if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ, BASR_DRQ, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n"); - } - if (NCR5380_poll_politely(hostdata, STATUS_REG, - SR_REQ, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n"); - } - d[*count - 1] = NCR5380_read(INPUT_DATA_REG); - } else { - /* - * Wait for the last byte to be sent. If REQ is being asserted for - * the byte we're interested, we'll ACK it and it will go false. - */ - if (NCR5380_poll_politely2(hostdata, - BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, - BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0) < 0) { - result = -1; - shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n"); + if ((hostdata->flags & FLAG_DMA_FIXUP) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + /* + * The workaround was to transfer fewer bytes than we + * intended to with the pseudo-DMA receive function, wait for + * the chip to latch the last byte, read it, and then disable + * DMA mode. + * + * After REQ is asserted, the NCR5380 asserts DRQ and ACK. + * REQ is deasserted when ACK is asserted, and not reasserted + * until ACK goes false. Since the NCR5380 won't lower ACK + * until DACK is asserted, which won't happen unless we twiddle + * the DMA port or we take the NCR5380 out of DMA mode, we + * can guarantee that we won't handshake another extra + * byte. + * + * If sending, wait for the last byte to be sent. If REQ is + * being asserted for the byte we're interested, we'll ACK it + * and it will go false. + */ + if (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, + BASR_DRQ, BASR_DRQ, 0)) { + if ((p & SR_IO) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + if (!NCR5380_poll_politely(hostdata, STATUS_REG, + SR_REQ, 0, 0)) { + d[c] = NCR5380_read(INPUT_DATA_REG); + --ncmd->this_residual; + } else { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: !REQ timeout\n"); + } } + } else if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH) { + result = -1; + scmd_printk(KERN_ERR, hostdata->connected, + "PDMA fixup: DRQ timeout\n"); } }
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c index 2e83a667901f..1a7437f4328e 100644 --- a/drivers/scsi/elx/libefc/efc_nport.c +++ b/drivers/scsi/elx/libefc/efc_nport.c @@ -705,9 +705,9 @@ efc_nport_vport_del(struct efc *efc, struct efc_domain *domain, spin_lock_irqsave(&efc->lock, flags); list_for_each_entry(nport, &domain->nport_list, list_entry) { if (nport->wwpn == wwpn && nport->wwnn == wwnn) { - kref_put(&nport->ref, nport->release); /* Shutdown this NPORT */ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL); + kref_put(&nport->ref, nport->release); break; } } diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 500253007b1d..26e1313ebb21 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4847,6 +4847,7 @@ struct fcp_iwrite64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 @@ -4863,6 +4864,7 @@ struct fcp_iread64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 @@ -4879,6 +4881,7 @@ struct fcp_icmnd64_wqe { #define cmd_buff_len_SHIFT 16 #define cmd_buff_len_MASK 0x00000ffff #define cmd_buff_len_WORD word3 +/* Note: payload_offset_len field depends on ASIC support */ #define payload_offset_len_SHIFT 0 #define payload_offset_len_MASK 0x0000ffff #define payload_offset_len_WORD word3 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e1dfa96c2a55..0c1404dc5f3b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4699,6 +4699,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) uint64_t wwn; bool use_no_reset_hba = false; int rc; + u8 if_type;
if (lpfc_no_hba_reset_cnt) { if (phba->sli_rev < LPFC_SLI_REV4 && @@ -4773,10 +4774,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; - if (phba->sli_rev == LPFC_SLI_REV4) - shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; - else + + /* Set max_cmd_len applicable to ASIC support */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_2: + fallthrough; + case LPFC_SLI_INTF_IF_TYPE_6: + shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; + break; + default: + shost->max_cmd_len = LPFC_FCP_CDB_LEN; + break; + } + } else { shost->max_cmd_len = LPFC_FCP_CDB_LEN; + }
if (phba->sli_rev == LPFC_SLI_REV4) { if (!phba->cfg_fcp_mq_threshold || diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 98ce9d97a225..9f0b59672e19 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4760,7 +4760,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
/* Word 3 */ bf_set(payload_offset_len, &wqe->fcp_icmd, - sizeof(struct fcp_cmnd32) + sizeof(struct fcp_rsp)); + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
/* Word 6 */ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 53ee8f84d094..3958f7dc679f 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -102,11 +102,15 @@ __setup("mac5380=", mac_scsi_setup); * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets * so bus errors are unavoidable. * - * If a MOVE.B instruction faults, we assume that zero bytes were transferred - * and simply retry. That assumption probably depends on target behaviour but - * seems to hold up okay. The NOP provides synchronization: without it the - * fault can sometimes occur after the program counter has moved past the - * offending instruction. Post-increment addressing can't be used. + * If a MOVE.B instruction faults during a receive operation, we assume the + * target sent nothing and try again. That assumption probably depends on + * target firmware but it seems to hold up okay. If a fault happens during a + * send operation, the target may or may not have seen /ACK and got the byte. + * It's uncertain so the whole SCSI command gets retried. + * + * The NOP is needed for synchronization because the fault address in the + * exception stack frame may or may not be the instruction that actually + * caused the bus error. Post-increment addressing can't be used. */
#define MOVE_BYTE(operands) \ @@ -208,8 +212,6 @@ __setup("mac5380=", mac_scsi_setup); ".previous \n" \ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
-#define MAC_PDMA_DELAY 32 - static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n) { unsigned char *addr = start; @@ -245,22 +247,21 @@ static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n) if (n >= 1) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -1; } if (n >= 1 && ((unsigned long)addr & 1)) { MOVE_BYTE("%0@,%3@"); if (result) - goto out; + return -2; } while (n >= 32) MOVE_16_WORDS("%0@+,%3@"); while (n >= 2) MOVE_WORD("%0@+,%3@"); if (result) - return start - addr; /* Negated to indicate uncertain length */ + return start - addr - 1; /* Negated to indicate uncertain length */ if (n == 1) MOVE_BYTE("%0@,%3@"); -out: return addr - start; }
@@ -274,25 +275,56 @@ static inline void write_ctrl_reg(struct NCR5380_hostdata *hostdata, u32 value) out_be32(hostdata->io + (CTRL_REG << 4), value); }
+static inline int macscsi_wait_for_drq(struct NCR5380_hostdata *hostdata) +{ + unsigned int n = 1; /* effectively multiplies NCR5380_REG_POLL_TIME */ + unsigned char basr; + +again: + basr = NCR5380_read(BUS_AND_STATUS_REG); + + if (!(basr & BASR_PHASE_MATCH)) + return 1; + + if (basr & BASR_IRQ) + return -1; + + if (basr & BASR_DRQ) + return 0; + + if (n-- == 0) { + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: DRQ timeout\n", __func__); + return -1; + } + + NCR5380_poll_politely2(hostdata, + BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ, + BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, 0); + goto again; +} + static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4); unsigned char *d = dst; - int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_recv(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) { d += bytes; @@ -300,37 +332,25 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata, }
if (hostdata->pdma_residual == 0) - goto out; + break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue;
- if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, d - dst, len, bytes, chunk_bytes);
- if (bytes >= 0) + if (bytes == 0) continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, d - dst, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; }
- scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; }
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, @@ -338,67 +358,47 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata, { unsigned char *s = src; u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4); - int result = 0;
hostdata->pdma_residual = len;
- while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, - BASR_DRQ | BASR_PHASE_MATCH, - BASR_DRQ | BASR_PHASE_MATCH, 0)) { - int bytes; + while (macscsi_wait_for_drq(hostdata) == 0) { + int bytes, chunk_bytes;
if (macintosh_config->ident == MAC_MODEL_IIFX) write_ctrl_reg(hostdata, CTRL_HANDSHAKE_MODE | CTRL_INTERRUPTS_ENABLE);
- bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512)); + chunk_bytes = min(hostdata->pdma_residual, 512); + bytes = mac_pdma_send(s, d, chunk_bytes); + + if (macintosh_config->ident == MAC_MODEL_IIFX) + write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE);
if (bytes > 0) { s += bytes; hostdata->pdma_residual -= bytes; }
- if (hostdata->pdma_residual == 0) { - if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, - TCR_LAST_BYTE_SENT, - TCR_LAST_BYTE_SENT, - 0) < 0) { - scmd_printk(KERN_ERR, hostdata->connected, - "%s: Last Byte Sent timeout\n", __func__); - result = -1; - } - goto out; - } + if (hostdata->pdma_residual == 0) + break;
- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ, - BUS_AND_STATUS_REG, BASR_ACK, - BASR_ACK, 0) < 0) - scmd_printk(KERN_DEBUG, hostdata->connected, - "%s: !REQ and !ACK\n", __func__); - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) - goto out; + if (bytes > 0) + continue;
- if (bytes == 0) - udelay(MAC_PDMA_DELAY); + NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); + dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, + "%s: bus error [%d/%d] (%d/%d)\n", + __func__, s - src, len, bytes, chunk_bytes);
- if (bytes >= 0) + if (bytes == 0) continue;
- dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host, - "%s: bus error (%d/%d)\n", __func__, s - src, len); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; - goto out; + if (macscsi_wait_for_drq(hostdata) <= 0) + set_host_byte(hostdata->connected, DID_ERROR); + break; }
- scmd_printk(KERN_ERR, hostdata->connected, - "%s: phase mismatch or !DRQ\n", __func__); - NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host); - result = -1; -out: - if (macintosh_config->ident == MAC_MODEL_IIFX) - write_ctrl_reg(hostdata, CTRL_INTERRUPTS_ENABLE); - return result; + return 0; }
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9db86943d04c..53896df7ec2b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1382,7 +1382,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, protect | fua, dld); - } else if (rq->cmd_flags & REQ_ATOMIC && write) { + } else if (rq->cmd_flags & REQ_ATOMIC) { ret = sd_setup_atomic_cmnd(cmd, lba, nr_blocks, sdkp->use_atomic_write_boundary, protect | fua); @@ -3404,7 +3404,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp, rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!vpd || vpd->len < 8) { + if (!vpd || vpd->len <= 8) { rcu_read_unlock(); return; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 24c7cb285dca..c1524fb334eb 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2354,14 +2354,6 @@ static inline void pqi_mask_device(u8 *scsi3addr) scsi3addr[3] |= 0xc0; }
-static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) -{ - if (pqi_is_logical_device(device)) - return false; - - return (device->path_map & (device->path_map - 1)) != 0; -} - static inline bool pqi_expose_device(struct pqi_scsi_dev *device) { return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); @@ -3258,14 +3250,12 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) int residual_count; int xfer_count; bool device_offline; - struct pqi_scsi_dev *device;
scmd = io_request->scmd; error_info = io_request->error_info; host_byte = DID_OK; sense_data_length = 0; device_offline = false; - device = scmd->device->hostdata;
switch (error_info->service_response) { case PQI_AIO_SERV_RESPONSE_COMPLETE: @@ -3290,14 +3280,8 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request) break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: pqi_aio_path_disabled(io_request); - if (pqi_is_multipath_device(device)) { - pqi_device_remove_start(device); - host_byte = DID_NO_CONNECT; - scsi_status = SAM_STAT_CHECK_CONDITION; - } else { - scsi_status = SAM_STAT_GOOD; - io_request->status = -EAGAIN; - } + scsi_status = SAM_STAT_GOOD; + io_request->status = -EAGAIN; break; case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c index 76bb496305a0..bacabf731dcb 100644 --- a/drivers/soc/fsl/qe/qmc.c +++ b/drivers/soc/fsl/qe/qmc.c @@ -940,11 +940,13 @@ static int qmc_chan_start_rx(struct qmc_chan *chan) goto end; }
- ret = qmc_setup_chan_trnsync(chan->qmc, chan); - if (ret) { - dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", - chan->id, ret); - goto end; + if (chan->mode == QMC_TRANSPARENT) { + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } }
/* Restart the receiver */ @@ -982,11 +984,13 @@ static int qmc_chan_start_tx(struct qmc_chan *chan) goto end; }
- ret = qmc_setup_chan_trnsync(chan->qmc, chan); - if (ret) { - dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", - chan->id, ret); - goto end; + if (chan->mode == QMC_TRANSPARENT) { + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } }
/* diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c index 6c5741cf5e9d..53968ea84c88 100644 --- a/drivers/soc/fsl/qe/tsa.c +++ b/drivers/soc/fsl/qe/tsa.c @@ -140,7 +140,7 @@ static inline void tsa_write32(void __iomem *addr, u32 val) iowrite32be(val, addr); }
-static inline void tsa_write8(void __iomem *addr, u32 val) +static inline void tsa_write8(void __iomem *addr, u8 val) { iowrite8(val, addr); } diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index b7056aed4c7d..9d64283d2125 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -196,9 +196,6 @@ static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { struct qcom_smd_rpm *rpm;
- if (!rpdev->dev.of_node) - return -EINVAL; - rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; @@ -218,18 +215,38 @@ static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) of_platform_depopulate(&rpdev->dev); }
-static const struct rpmsg_device_id qcom_smd_rpm_id_table[] = { - { .name = "rpm_requests", }, - { /* sentinel */ } +static const struct of_device_id qcom_smd_rpm_of_match[] = { + { .compatible = "qcom,rpm-apq8084" }, + { .compatible = "qcom,rpm-ipq6018" }, + { .compatible = "qcom,rpm-ipq9574" }, + { .compatible = "qcom,rpm-msm8226" }, + { .compatible = "qcom,rpm-msm8909" }, + { .compatible = "qcom,rpm-msm8916" }, + { .compatible = "qcom,rpm-msm8936" }, + { .compatible = "qcom,rpm-msm8953" }, + { .compatible = "qcom,rpm-msm8974" }, + { .compatible = "qcom,rpm-msm8976" }, + { .compatible = "qcom,rpm-msm8994" }, + { .compatible = "qcom,rpm-msm8996" }, + { .compatible = "qcom,rpm-msm8998" }, + { .compatible = "qcom,rpm-sdm660" }, + { .compatible = "qcom,rpm-sm6115" }, + { .compatible = "qcom,rpm-sm6125" }, + { .compatible = "qcom,rpm-sm6375" }, + { .compatible = "qcom,rpm-qcm2290" }, + { .compatible = "qcom,rpm-qcs404" }, + {} }; -MODULE_DEVICE_TABLE(rpmsg, qcom_smd_rpm_id_table); +MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
static struct rpmsg_driver qcom_smd_rpm_driver = { .probe = qcom_smd_rpm_probe, .remove = qcom_smd_rpm_remove, .callback = qcom_smd_rpm_callback, - .id_table = qcom_smd_rpm_id_table, - .drv.name = "qcom_smd_rpm", + .drv = { + .name = "qcom_smd_rpm", + .of_match_table = qcom_smd_rpm_of_match, + }, };
static int __init qcom_smd_rpm_init(void) diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c index bab4ad87aa75..d5099a3386b4 100644 --- a/drivers/soc/versatile/soc-integrator.c +++ b/drivers/soc/versatile/soc-integrator.c @@ -113,6 +113,7 @@ static int __init integrator_soc_init(void) return -ENODEV;
syscon_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap);
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c index c6876d232d8f..cf91abe07d38 100644 --- a/drivers/soc/versatile/soc-realview.c +++ b/drivers/soc/versatile/soc-realview.c @@ -4,6 +4,7 @@ * * Author: Linus Walleij linus.walleij@linaro.org */ +#include <linux/device.h> #include <linux/init.h> #include <linux/io.h> #include <linux/slab.h> @@ -81,6 +82,13 @@ static struct attribute *realview_attrs[] = {
ATTRIBUTE_GROUPS(realview);
+static void realview_soc_socdev_release(void *data) +{ + struct soc_device *soc_dev = data; + + soc_device_unregister(soc_dev); +} + static int realview_soc_probe(struct platform_device *pdev) { struct regmap *syscon_regmap; @@ -93,7 +101,7 @@ static int realview_soc_probe(struct platform_device *pdev) if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap);
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM;
@@ -106,10 +114,14 @@ static int realview_soc_probe(struct platform_device *pdev) soc_dev_attr->family = "Versatile"; soc_dev_attr->custom_attr_group = realview_groups[0]; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - kfree(soc_dev_attr); + if (IS_ERR(soc_dev)) return -ENODEV; - } + + ret = devm_add_action_or_reset(&pdev->dev, realview_soc_socdev_release, + soc_dev); + if (ret) + return ret; + ret = regmap_read(syscon_regmap, REALVIEW_SYS_ID_OFFSET, &realview_coreid); if (ret) diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 5aaff3bee1b7..9ea91432c11d 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -375,9 +375,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, * If the QSPI controller is set in regular SPI mode, set it in * Serial Memory Mode (SMM). */ - if (aq->mr != QSPI_MR_SMM) { - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + if (!(aq->mr & QSPI_MR_SMM)) { + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); }
/* Clear pending interrupts */ @@ -501,7 +501,8 @@ static int atmel_qspi_setup(struct spi_device *spi) if (ret < 0) return ret;
- aq->scr = QSPI_SCR_SCBR(scbr); + aq->scr &= ~QSPI_SCR_SCBR_MASK; + aq->scr |= QSPI_SCR_SCBR(scbr); atmel_qspi_write(aq->scr, aq, QSPI_SCR);
pm_runtime_mark_last_busy(ctrl->dev.parent); @@ -534,6 +535,7 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) if (ret < 0) return ret;
+ aq->scr &= ~QSPI_SCR_DLYBS_MASK; aq->scr |= QSPI_SCR_DLYBS(cs_setup); atmel_qspi_write(aq->scr, aq, QSPI_SCR);
@@ -549,8 +551,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq) atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */ - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR);
/* Enable the QSPI controller */ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); @@ -726,6 +728,7 @@ static void atmel_qspi_remove(struct platform_device *pdev) clk_unprepare(aq->pclk);
pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); }
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c index 9d97ec98881c..94458df53eae 100644 --- a/drivers/spi/spi-airoha-snfi.c +++ b/drivers/spi/spi-airoha-snfi.c @@ -211,9 +211,6 @@ struct airoha_snand_dev {
u8 *txrx_buf; dma_addr_t dma_addr; - - u64 cur_page_num; - bool data_need_update; };
struct airoha_snand_ctrl { @@ -405,7 +402,7 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, for (i = 0; i < len; i += data_len) { int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE); + data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len); if (err) return err; @@ -427,7 +424,7 @@ static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data, for (i = 0; i < len; i += data_len) { int err;
- data_len = min(len, SPI_MAX_TRANSFER_SIZE); + data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len); if (err) return err; @@ -644,11 +641,6 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, u32 val, rd_mode; int err;
- if (!as_dev->data_need_update) - return len; - - as_dev->data_need_update = false; - switch (op->cmd.opcode) { case SPI_NAND_OP_READ_FROM_CACHE_DUAL: rd_mode = 1; @@ -739,8 +731,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, if (err) return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, - SPI_NFI_READ_FROM_CACHE_DONE); + /* + * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end + * of dirmap_read operation even if it is already set. + */ + err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, + SPI_NFI_READ_FROM_CACHE_DONE, + SPI_NFI_READ_FROM_CACHE_DONE); if (err) return err;
@@ -870,8 +867,13 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, if (err) return err;
- err = regmap_set_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, - SPI_NFI_LOAD_TO_CACHE_DONE); + /* + * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end + * of dirmap_write operation even if it is already set. + */ + err = regmap_write_bits(as_ctrl->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1, + SPI_NFI_LOAD_TO_CACHE_DONE, + SPI_NFI_LOAD_TO_CACHE_DONE); if (err) return err;
@@ -885,23 +887,11 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, static int airoha_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct airoha_snand_dev *as_dev = spi_get_ctldata(mem->spi); u8 data[8], cmd, opcode = op->cmd.opcode; struct airoha_snand_ctrl *as_ctrl; int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller); - if (opcode == SPI_NAND_OP_PROGRAM_EXECUTE && - op->addr.val == as_dev->cur_page_num) { - as_dev->data_need_update = true; - } else if (opcode == SPI_NAND_OP_PAGE_READ) { - if (!as_dev->data_need_update && - op->addr.val == as_dev->cur_page_num) - return 0; - - as_dev->data_need_update = true; - as_dev->cur_page_num = op->addr.val; - }
/* switch to manual mode */ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); @@ -986,7 +976,6 @@ static int airoha_snand_setup(struct spi_device *spi) if (dma_mapping_error(as_ctrl->dev, as_dev->dma_addr)) return -ENOMEM;
- as_dev->data_need_update = true; spi_set_ctldata(spi, as_dev);
return 0; diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c index 9f64afd8164e..4965bc86d7f5 100644 --- a/drivers/spi/spi-bcmbca-hsspi.c +++ b/drivers/spi/spi-bcmbca-hsspi.c @@ -546,12 +546,14 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) goto out_put_host; }
- pm_runtime_enable(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret) + goto out_put_host;
ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group); if (ret) { dev_err(&pdev->dev, "couldn't register sysfs group\n"); - goto out_pm_disable; + goto out_put_host; }
/* register and we are done */ @@ -565,8 +567,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
out_sysgroup_disable: sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group); -out_pm_disable: - pm_runtime_disable(&pdev->dev); out_put_host: spi_controller_put(host); out_disable_pll_clk: diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 8ecb426be45c..977e8b55c82b 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -986,6 +986,7 @@ static void fsl_lpspi_remove(struct platform_device *pdev)
fsl_lpspi_dma_exit(controller);
+ pm_runtime_dont_use_autosuspend(fsl_lpspi->dev); pm_runtime_disable(fsl_lpspi->dev); }
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 6585b19a4866..a961785724cd 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -57,13 +57,6 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h>
-/* - * The driver only uses one single LUT entry, that is updated on - * each call of exec_op(). Index 0 is preset at boot with a basic - * read operation, so let's use the last entry (31). - */ -#define SEQID_LUT 31 - /* Registers used by the driver */ #define FSPI_MCR0 0x00 #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) @@ -263,9 +256,6 @@ #define FSPI_TFDR 0x180
#define FSPI_LUT_BASE 0x200 -#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) -#define FSPI_LUT_REG(idx) \ - (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
/* register map end */
@@ -341,6 +331,7 @@ struct nxp_fspi_devtype_data { unsigned int txfifo; unsigned int ahb_buf_size; unsigned int quirks; + unsigned int lut_num; bool little_endian; };
@@ -349,6 +340,7 @@ static struct nxp_fspi_devtype_data lx2160a_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ };
@@ -357,6 +349,7 @@ static struct nxp_fspi_devtype_data imx8mm_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ };
@@ -365,6 +358,7 @@ static struct nxp_fspi_devtype_data imx8qxp_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = 0, + .lut_num = 32, .little_endian = true, /* little-endian */ };
@@ -373,6 +367,16 @@ static struct nxp_fspi_devtype_data imx8dxl_data = { .txfifo = SZ_1K, /* (128 * 64 bits) */ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ .quirks = FSPI_QUIRK_USE_IP_ONLY, + .lut_num = 32, + .little_endian = true, /* little-endian */ +}; + +static struct nxp_fspi_devtype_data imx8ulp_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .lut_num = 16, .little_endian = true, /* little-endian */ };
@@ -544,6 +548,8 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, void __iomem *base = f->iobase; u32 lutval[4] = {}; int lutidx = 1, i; + u32 lut_offset = (f->devtype_data->lut_num - 1) * 4 * 4; + u32 target_lut_reg;
/* cmd */ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), @@ -588,8 +594,10 @@ static void nxp_fspi_prepare_lut(struct nxp_fspi *f, fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
/* fill LUT */ - for (i = 0; i < ARRAY_SIZE(lutval); i++) - fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); + for (i = 0; i < ARRAY_SIZE(lutval); i++) { + target_lut_reg = FSPI_LUT_BASE + lut_offset + i * 4; + fspi_writel(f, lutval[i], base + target_lut_reg); + }
dev_dbg(f->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x], size: 0x%08x\n", op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], op->data.nbytes); @@ -876,7 +884,7 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) void __iomem *base = f->iobase; int seqnum = 0; int err = 0; - u32 reg; + u32 reg, seqid_lut;
reg = fspi_readl(f, base + FSPI_IPRXFCR); /* invalid RXFIFO first */ @@ -892,8 +900,9 @@ static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) * the LUT at each exec_op() call. And also specify the DATA * length, since it's has not been specified in the LUT. */ + seqid_lut = f->devtype_data->lut_num - 1; fspi_writel(f, op->data.nbytes | - (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | + (seqid_lut << FSPI_IPCR1_SEQID_SHIFT) | (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), base + FSPI_IPCR1);
@@ -1017,7 +1026,7 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) { void __iomem *base = f->iobase; int ret, i; - u32 reg; + u32 reg, seqid_lut;
/* disable and unprepare clock to avoid glitch pass to controller */ nxp_fspi_clk_disable_unprep(f); @@ -1092,11 +1101,17 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f) fspi_writel(f, reg, base + FSPI_FLSHB1CR1); fspi_writel(f, reg, base + FSPI_FLSHB2CR1);
+ /* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry. + */ + seqid_lut = f->devtype_data->lut_num - 1; /* AHB Read - Set lut sequence ID for all CS. */ - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); - fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHA1CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHA2CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHB1CR2); + fspi_writel(f, seqid_lut, base + FSPI_FLSHB2CR2);
f->selected = -1;
@@ -1291,6 +1306,7 @@ static const struct of_device_id nxp_fspi_dt_ids[] = { { .compatible = "nxp,imx8mp-fspi", .data = (void *)&imx8mm_data, }, { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, }, { .compatible = "nxp,imx8dxl-fspi", .data = (void *)&imx8dxl_data, }, + { .compatible = "nxp,imx8ulp-fspi", .data = (void *)&imx8ulp_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c index 942c3117ab3a..8f6309f32de0 100644 --- a/drivers/spi/spi-ppc4xx.c +++ b/drivers/spi/spi-ppc4xx.c @@ -27,7 +27,6 @@ #include <linux/wait.h> #include <linux/platform_device.h> #include <linux/of_address.h> -#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -412,7 +411,11 @@ static int spi_ppc4xx_of_probe(struct platform_device *op) }
/* Request IRQ */ - hw->irqnum = irq_of_parse_and_map(np, 0); + ret = platform_get_irq(op, 0); + if (ret < 0) + goto free_host; + hw->irqnum = ret; + ret = request_irq(hw->irqnum, spi_ppc4xx_int, 0, "spi_ppc4xx_of", (void *)hw); if (ret) { diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c index fecd3e67c7a1..b6d34145bc19 100644 --- a/drivers/staging/media/starfive/camss/stf-camss.c +++ b/drivers/staging/media/starfive/camss/stf-camss.c @@ -358,8 +358,6 @@ static int stfcamss_probe(struct platform_device *pdev) /* * stfcamss_remove - Remove STFCAMSS platform device * @pdev: Pointer to STFCAMSS platform device - * - * Always returns 0. */ static void stfcamss_remove(struct platform_device *pdev) { diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index daed67d19efb..863e7a4272e6 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -92,23 +92,21 @@ static void bang_bang_manage(struct thermal_zone_device *tz)
for_each_trip_desc(tz, td) { const struct thermal_trip *trip = &td->trip; + bool turn_on;
- if (tz->temperature >= td->threshold || - trip->temperature == THERMAL_TEMP_INVALID || + if (trip->temperature == THERMAL_TEMP_INVALID || trip->type == THERMAL_TRIP_CRITICAL || trip->type == THERMAL_TRIP_HOT) continue;
/* - * If the initial cooling device state is "on", but the zone - * temperature is not above the trip point, the core will not - * call bang_bang_control() until the zone temperature reaches - * the trip point temperature which may be never. In those - * cases, set the initial state of the cooling device to 0. + * Adjust the target states for uninitialized thermal instances + * to the thermal zone temperature and the trip point threshold. */ + turn_on = tz->temperature >= td->threshold; list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (!instance->initialized && instance->trip == trip) - bang_bang_set_instance_target(instance, 0); + bang_bang_set_instance_target(instance, turn_on); } }
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index e6669aeda1ff..93c16aff0df2 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -323,11 +323,15 @@ static void thermal_zone_broken_disable(struct thermal_zone_device *tz) static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, unsigned long delay) { - if (delay) - mod_delayed_work(system_freezable_power_efficient_wq, - &tz->poll_queue, delay); - else + if (!delay) { cancel_delayed_work(&tz->poll_queue); + return; + } + + if (delay > HZ) + delay = round_jiffies_relative(delay); + + mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay); }
static void thermal_zone_recheck(struct thermal_zone_device *tz, int error) @@ -991,20 +995,6 @@ void print_bind_err_msg(struct thermal_zone_device *tz, tz->type, cdev->type, ret); }
-static void bind_cdev(struct thermal_cooling_device *cdev) -{ - int ret; - struct thermal_zone_device *pos = NULL; - - list_for_each_entry(pos, &thermal_tz_list, node) { - if (pos->ops.bind) { - ret = pos->ops.bind(pos, cdev); - if (ret) - print_bind_err_msg(pos, cdev, ret); - } - } -} - /** * __thermal_cooling_device_register() - register a new thermal cooling device * @np: a pointer to a device tree node. @@ -1100,7 +1090,13 @@ __thermal_cooling_device_register(struct device_node *np, list_add(&cdev->node, &thermal_cdev_list);
/* Update binding information for 'this' new cdev */ - bind_cdev(cdev); + list_for_each_entry(pos, &thermal_tz_list, node) { + if (pos->ops.bind) { + ret = pos->ops.bind(pos, cdev); + if (ret) + print_bind_err_msg(pos, cdev, ret); + } + }
list_for_each_entry(pos, &thermal_tz_list, node) if (atomic_cmpxchg(&pos->need_update, 1, 0)) @@ -1338,32 +1334,6 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) } EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
-static void bind_tz(struct thermal_zone_device *tz) -{ - int ret; - struct thermal_cooling_device *pos = NULL; - - if (!tz->ops.bind) - return; - - mutex_lock(&thermal_list_lock); - - list_for_each_entry(pos, &thermal_cdev_list, node) { - ret = tz->ops.bind(tz, pos); - if (ret) - print_bind_err_msg(tz, pos, ret); - } - - mutex_unlock(&thermal_list_lock); -} - -static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms) -{ - *delay_jiffies = msecs_to_jiffies(delay_ms); - if (delay_ms > 1000) - *delay_jiffies = round_jiffies(*delay_jiffies); -} - int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp) { const struct thermal_trip_desc *td; @@ -1509,8 +1479,8 @@ thermal_zone_device_register_with_trips(const char *type, td->threshold = INT_MAX; }
- thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay); - thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay); + tz->polling_delay_jiffies = msecs_to_jiffies(polling_delay); + tz->passive_delay_jiffies = msecs_to_jiffies(passive_delay); tz->recheck_delay_jiffies = THERMAL_RECHECK_DELAY;
/* sys I/F */ @@ -1554,13 +1524,23 @@ thermal_zone_device_register_with_trips(const char *type, }
mutex_lock(&thermal_list_lock); + mutex_lock(&tz->lock); list_add_tail(&tz->node, &thermal_tz_list); mutex_unlock(&tz->lock); - mutex_unlock(&thermal_list_lock);
/* Bind cooling devices for this zone */ - bind_tz(tz); + if (tz->ops.bind) { + struct thermal_cooling_device *cdev; + + list_for_each_entry(cdev, &thermal_cdev_list, node) { + result = tz->ops.bind(tz, cdev); + if (result) + print_bind_err_msg(tz, cdev, result); + } + } + + mutex_unlock(&thermal_list_lock);
thermal_zone_device_init(tz); /* Update the new thermal zone and mark it as already updated. */ diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 4cf2b7230d04..3c8e2bca87f2 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -15,8 +15,20 @@ #include "thermal_netlink.h" #include "thermal_debugfs.h"
+struct thermal_attr { + struct device_attribute attr; + char name[THERMAL_NAME_LENGTH]; +}; + +struct thermal_trip_attrs { + struct thermal_attr type; + struct thermal_attr temp; + struct thermal_attr hyst; +}; + struct thermal_trip_desc { struct thermal_trip trip; + struct thermal_trip_attrs trip_attrs; struct list_head notify_list_node; int notify_temp; int threshold; @@ -56,9 +68,6 @@ struct thermal_governor { * @device: &struct device for this thermal zone * @removal: removal completion * @resume: resume completion - * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature - * @trip_type_attrs: attributes for trip points for sysfs: trip type - * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @num_trips: number of trip points the thermal zone supports @@ -102,9 +111,6 @@ struct thermal_zone_device { struct completion removal; struct completion resume; struct attribute_group trips_attribute_group; - struct thermal_attr *trip_temp_attrs; - struct thermal_attr *trip_type_attrs; - struct thermal_attr *trip_hyst_attrs; enum thermal_device_mode mode; void *devdata; int num_trips; @@ -188,11 +194,6 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
struct thermal_zone_device *thermal_zone_get_by_id(int id);
-struct thermal_attr { - struct device_attribute attr; - char name[THERMAL_NAME_LENGTH]; -}; - static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) { return cdev->ops->get_requested_power && cdev->ops->state2power && @@ -262,11 +263,11 @@ const char *thermal_trip_type_name(enum thermal_trip_type trip_type); void thermal_zone_set_trips(struct thermal_zone_device *tz); int thermal_zone_trip_id(const struct thermal_zone_device *tz, const struct thermal_trip *trip); -void thermal_zone_trip_updated(struct thermal_zone_device *tz, - const struct thermal_trip *trip); int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); void thermal_zone_trip_down(struct thermal_zone_device *tz, const struct thermal_trip *trip); +void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz, + struct thermal_trip *trip, int hyst);
/* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *tz); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 72b302bf914e..d628dd67be5c 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/container_of.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/err.h> @@ -78,51 +79,58 @@ mode_store(struct device *dev, struct device_attribute *attr, return count; }
+#define thermal_trip_of_attr(_ptr_, _attr_) \ + ({ \ + struct thermal_trip_desc *td; \ + \ + td = container_of(_ptr_, struct thermal_trip_desc, \ + trip_attrs._attr_.attr); \ + &td->trip; \ + }) + static ssize_t trip_point_type_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip_id; + struct thermal_trip *trip = thermal_trip_of_attr(attr, type);
- if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1) - return -EINVAL; - - return sprintf(buf, "%s\n", thermal_trip_type_name(tz->trips[trip_id].trip.type)); + return sprintf(buf, "%s\n", thermal_trip_type_name(trip->type)); }
static ssize_t trip_point_temp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct thermal_trip *trip = thermal_trip_of_attr(attr, temp); struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip *trip; - int trip_id, ret; - int temp; + int ret, temp;
ret = kstrtoint(buf, 10, &temp); if (ret) return -EINVAL;
- if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) - return -EINVAL; - mutex_lock(&tz->lock);
- trip = &tz->trips[trip_id].trip; - - if (temp != trip->temperature) { - if (tz->ops.set_trip_temp) { - ret = tz->ops.set_trip_temp(tz, trip, temp); - if (ret) - goto unlock; - } + if (temp == trip->temperature) + goto unlock;
- thermal_zone_set_trip_temp(tz, trip, temp); + /* Arrange the condition to avoid integer overflows. */ + if (temp != THERMAL_TEMP_INVALID && + temp <= trip->hysteresis + THERMAL_TEMP_INVALID) { + ret = -EINVAL; + goto unlock; + }
- __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + if (tz->ops.set_trip_temp) { + ret = tz->ops.set_trip_temp(tz, trip, temp); + if (ret) + goto unlock; }
+ thermal_zone_set_trip_temp(tz, trip, temp); + + __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + unlock: mutex_unlock(&tz->lock);
@@ -133,57 +141,61 @@ static ssize_t trip_point_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip_id; + struct thermal_trip *trip = thermal_trip_of_attr(attr, temp);
- if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) - return -EINVAL; - - return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.temperature)); + return sprintf(buf, "%d\n", READ_ONCE(trip->temperature)); }
static ssize_t trip_point_hyst_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst); struct thermal_zone_device *tz = to_thermal_zone(dev); - struct thermal_trip *trip; - int trip_id, ret; - int hyst; + int ret, hyst;
ret = kstrtoint(buf, 10, &hyst); if (ret || hyst < 0) return -EINVAL;
- if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) - return -EINVAL; - mutex_lock(&tz->lock);
- trip = &tz->trips[trip_id].trip; + if (hyst == trip->hysteresis) + goto unlock;
- if (hyst != trip->hysteresis) { + /* + * Allow the hysteresis to be updated when the temperature is invalid + * to allow user space to avoid having to adjust hysteresis after a + * valid temperature has been set, but in that case just change the + * value and do nothing else. + */ + if (trip->temperature == THERMAL_TEMP_INVALID) { WRITE_ONCE(trip->hysteresis, hyst); + goto unlock; + }
- thermal_zone_trip_updated(tz, trip); + if (trip->temperature - hyst <= THERMAL_TEMP_INVALID) { + ret = -EINVAL; + goto unlock; }
+ thermal_zone_set_trip_hyst(tz, trip, hyst); + + __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); + +unlock: mutex_unlock(&tz->lock);
- return count; + return ret ? ret : count; }
static ssize_t trip_point_hyst_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct thermal_zone_device *tz = to_thermal_zone(dev); - int trip_id; - - if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) - return -EINVAL; + struct thermal_trip *trip = thermal_trip_of_attr(attr, hyst);
- return sprintf(buf, "%d\n", READ_ONCE(tz->trips[trip_id].trip.hysteresis)); + return sprintf(buf, "%d\n", READ_ONCE(trip->hysteresis)); }
static ssize_t @@ -382,87 +394,55 @@ static const struct attribute_group *thermal_zone_attribute_groups[] = { */ static int create_trip_attrs(struct thermal_zone_device *tz) { - const struct thermal_trip_desc *td; + struct thermal_trip_desc *td; struct attribute **attrs; - - /* This function works only for zones with at least one trip */ - if (tz->num_trips <= 0) - return -EINVAL; - - tz->trip_type_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_type_attrs), - GFP_KERNEL); - if (!tz->trip_type_attrs) - return -ENOMEM; - - tz->trip_temp_attrs = kcalloc(tz->num_trips, sizeof(*tz->trip_temp_attrs), - GFP_KERNEL); - if (!tz->trip_temp_attrs) { - kfree(tz->trip_type_attrs); - return -ENOMEM; - } - - tz->trip_hyst_attrs = kcalloc(tz->num_trips, - sizeof(*tz->trip_hyst_attrs), - GFP_KERNEL); - if (!tz->trip_hyst_attrs) { - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - return -ENOMEM; - } + int i;
attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL); - if (!attrs) { - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); + if (!attrs) return -ENOMEM; - }
+ i = 0; for_each_trip_desc(tz, td) { - int indx = thermal_zone_trip_id(tz, &td->trip); + struct thermal_trip_attrs *trip_attrs = &td->trip_attrs;
/* create trip type attribute */ - snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_type", indx); + snprintf(trip_attrs->type.name, THERMAL_NAME_LENGTH, + "trip_point_%d_type", i);
- sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr); - tz->trip_type_attrs[indx].attr.attr.name = - tz->trip_type_attrs[indx].name; - tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_type_attrs[indx].attr.show = trip_point_type_show; - attrs[indx] = &tz->trip_type_attrs[indx].attr.attr; + sysfs_attr_init(&trip_attrs->type.attr.attr); + trip_attrs->type.attr.attr.name = trip_attrs->type.name; + trip_attrs->type.attr.attr.mode = S_IRUGO; + trip_attrs->type.attr.show = trip_point_type_show; + attrs[i] = &trip_attrs->type.attr.attr;
/* create trip temp attribute */ - snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_temp", indx); - - sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr); - tz->trip_temp_attrs[indx].attr.attr.name = - tz->trip_temp_attrs[indx].name; - tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; + snprintf(trip_attrs->temp.name, THERMAL_NAME_LENGTH, + "trip_point_%d_temp", i); + + sysfs_attr_init(&trip_attrs->temp.attr.attr); + trip_attrs->temp.attr.attr.name = trip_attrs->temp.name; + trip_attrs->temp.attr.attr.mode = S_IRUGO; + trip_attrs->temp.attr.show = trip_point_temp_show; if (td->trip.flags & THERMAL_TRIP_FLAG_RW_TEMP) { - tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_temp_attrs[indx].attr.store = - trip_point_temp_store; + trip_attrs->temp.attr.attr.mode |= S_IWUSR; + trip_attrs->temp.attr.store = trip_point_temp_store; } - attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr; + attrs[i + tz->num_trips] = &trip_attrs->temp.attr.attr;
- snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH, - "trip_point_%d_hyst", indx); + snprintf(trip_attrs->hyst.name, THERMAL_NAME_LENGTH, + "trip_point_%d_hyst", i);
- sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr); - tz->trip_hyst_attrs[indx].attr.attr.name = - tz->trip_hyst_attrs[indx].name; - tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; - tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; + sysfs_attr_init(&trip_attrs->hyst.attr.attr); + trip_attrs->hyst.attr.attr.name = trip_attrs->hyst.name; + trip_attrs->hyst.attr.attr.mode = S_IRUGO; + trip_attrs->hyst.attr.show = trip_point_hyst_show; if (td->trip.flags & THERMAL_TRIP_FLAG_RW_HYST) { - tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; - tz->trip_hyst_attrs[indx].attr.store = - trip_point_hyst_store; + trip_attrs->hyst.attr.attr.mode |= S_IWUSR; + trip_attrs->hyst.attr.store = trip_point_hyst_store; } - attrs[indx + tz->num_trips * 2] = - &tz->trip_hyst_attrs[indx].attr.attr; + attrs[i + 2 * tz->num_trips] = &trip_attrs->hyst.attr.attr; + i++; } attrs[tz->num_trips * 3] = NULL;
@@ -479,13 +459,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz) */ static void destroy_trip_attrs(struct thermal_zone_device *tz) { - if (!tz) - return; - - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); - kfree(tz->trips_attribute_group.attrs); + if (tz) + kfree(tz->trips_attribute_group.attrs); }
int thermal_zone_create_device_groups(struct thermal_zone_device *tz) diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index 06a0554ddc38..fa5ac9b51231 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -138,11 +138,11 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz, return trip_to_trip_desc(trip) - tz->trips; }
-void thermal_zone_trip_updated(struct thermal_zone_device *tz, - const struct thermal_trip *trip) +void thermal_zone_set_trip_hyst(struct thermal_zone_device *tz, + struct thermal_trip *trip, int hyst) { + WRITE_ONCE(trip->hysteresis, hyst); thermal_notify_tz_trip_change(tz, trip); - __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED); }
void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index afef1dd4ddf4..fca5f25d693a 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1581,7 +1581,7 @@ static int omap8250_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, up.port.irq, omap8250_irq, 0, dev_name(&pdev->dev), priv); if (ret < 0) - return ret; + goto err;
priv->wakeirq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 69a632fefc41..f8f6e9466b40 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -124,13 +124,14 @@ struct qcom_geni_serial_port { dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; bool setup; - unsigned int baud; + unsigned long poll_timeout_us; unsigned long clk_rate; void *rx_buf; u32 loopback; bool brk;
unsigned int tx_remaining; + unsigned int tx_queued; int wakeup_irq; bool rx_tx_swap; bool cts_rts_swap; @@ -144,6 +145,8 @@ static const struct uart_ops qcom_geni_uart_pops; static struct uart_driver qcom_geni_console_driver; static struct uart_driver qcom_geni_uart_driver;
+static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport); + static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport) { return container_of(uport, struct qcom_geni_serial_port, uport); @@ -265,27 +268,18 @@ static bool qcom_geni_serial_secondary_active(struct uart_port *uport) return readl(uport->membase + SE_GENI_STATUS) & S_GENI_CMD_ACTIVE; }
-static bool qcom_geni_serial_poll_bit(struct uart_port *uport, - int offset, int field, bool set) +static bool qcom_geni_serial_poll_bitfield(struct uart_port *uport, + unsigned int offset, u32 field, u32 val) { u32 reg; struct qcom_geni_serial_port *port; - unsigned int baud; - unsigned int fifo_bits; unsigned long timeout_us = 20000; struct qcom_geni_private_data *private_data = uport->private_data;
if (private_data->drv) { port = to_dev_port(uport); - baud = port->baud; - if (!baud) - baud = 115200; - fifo_bits = port->tx_fifo_depth * port->tx_fifo_width; - /* - * Total polling iterations based on FIFO worth of bytes to be - * sent at current baud. Add a little fluff to the wait. - */ - timeout_us = ((fifo_bits * USEC_PER_SEC) / baud) + 500; + if (port->poll_timeout_us) + timeout_us = port->poll_timeout_us; }
/* @@ -295,7 +289,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport, timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10; while (timeout_us) { reg = readl(uport->membase + offset); - if ((bool)(reg & field) == set) + if ((reg & field) == val) return true; udelay(10); timeout_us -= 10; @@ -303,6 +297,12 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport, return false; }
+static bool qcom_geni_serial_poll_bit(struct uart_port *uport, + unsigned int offset, u32 field, bool set) +{ + return qcom_geni_serial_poll_bitfield(uport, offset, field, set ? field : 0); +} + static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size) { u32 m_cmd; @@ -315,18 +315,16 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size) static void qcom_geni_serial_poll_tx_done(struct uart_port *uport) { int done; - u32 irq_clear = M_CMD_DONE_EN;
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_CMD_DONE_EN, true); if (!done) { writel(M_GENI_CMD_ABORT, uport->membase + SE_GENI_M_CMD_CTRL_REG); - irq_clear |= M_CMD_ABORT_EN; qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_CMD_ABORT_EN, true); + writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); } - writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR); }
static void qcom_geni_serial_abort_rx(struct uart_port *uport) @@ -387,6 +385,7 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport, unsigned char c) { writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); qcom_geni_serial_setup_tx(uport, 1); WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, M_TX_FIFO_WATERMARK_EN, true)); @@ -397,6 +396,14 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport, #endif
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE +static void qcom_geni_serial_drain_fifo(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport); + + qcom_geni_serial_poll_bitfield(uport, SE_GENI_M_GP_LENGTH, GP_LENGTH, + port->tx_queued); +} + static void qcom_geni_serial_wr_char(struct uart_port *uport, unsigned char ch) { struct qcom_geni_private_data *private_data = uport->private_data; @@ -431,6 +438,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s, }
writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); + writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); qcom_geni_serial_setup_tx(uport, bytes_to_send); for (i = 0; i < count; ) { size_t chars_to_write = 0; @@ -471,8 +479,6 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, struct qcom_geni_serial_port *port; bool locked = true; unsigned long flags; - u32 geni_status; - u32 irq_en;
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
@@ -486,40 +492,20 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, else uart_port_lock_irqsave(uport, &flags);
- geni_status = readl(uport->membase + SE_GENI_STATUS); + if (qcom_geni_serial_main_active(uport)) { + /* Wait for completion or drain FIFO */ + if (!locked || port->tx_remaining == 0) + qcom_geni_serial_poll_tx_done(uport); + else + qcom_geni_serial_drain_fifo(uport);
- if (!locked) { - /* - * We can only get here if an oops is in progress then we were - * unable to get the lock. This means we can't safely access - * our state variables like tx_remaining. About the best we - * can do is wait for the FIFO to be empty before we start our - * transfer, so we'll do that. - */ - qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS, - M_TX_FIFO_NOT_EMPTY_EN, false); - } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) { - /* - * It seems we can't interrupt existing transfers if all data - * has been sent, in which case we need to look for done first. - */ - qcom_geni_serial_poll_tx_done(uport); - - if (!kfifo_is_empty(&uport->state->port.xmit_fifo)) { - irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); - writel(irq_en | M_TX_FIFO_WATERMARK_EN, - uport->membase + SE_GENI_M_IRQ_EN); - } + qcom_geni_serial_cancel_tx_cmd(uport); }
__qcom_geni_serial_console_write(uport, s, count);
- - if (locked) { - if (port->tx_remaining) - qcom_geni_serial_setup_tx(uport, port->tx_remaining); + if (locked) uart_port_unlock_irqrestore(uport, flags); - } }
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop) @@ -700,6 +686,7 @@ static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
port->tx_remaining = 0; + port->tx_queued = 0; }
static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop) @@ -926,6 +913,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, if (!port->tx_remaining) { qcom_geni_serial_setup_tx(uport, pending); port->tx_remaining = pending; + port->tx_queued = 0;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); if (!(irq_en & M_TX_FIFO_WATERMARK_EN)) @@ -934,6 +922,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, }
qcom_geni_serial_send_chunk_fifo(uport, chunk); + port->tx_queued += chunk;
/* * The tx fifo watermark is level triggered and latched. Though we had @@ -1244,11 +1233,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, unsigned long clk_rate; u32 ver, sampling_rate; unsigned int avg_bw_core; + unsigned long timeout;
qcom_geni_serial_stop_rx(uport); /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); - port->baud = baud;
sampling_rate = UART_OVERSAMPLING; /* Sampling rate is halved for IP versions >= 2.5 */ @@ -1326,9 +1315,21 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport, else tx_trans_cfg |= UART_CTS_MASK;
- if (baud) + if (baud) { uart_update_timeout(uport, termios->c_cflag, baud);
+ /* + * Make sure that qcom_geni_serial_poll_bitfield() waits for + * the FIFO, two-word intermediate transfer register and shift + * register to clear. + * + * Note that uart_fifo_timeout() also adds a 20 ms margin. + */ + timeout = jiffies_to_usecs(uart_fifo_timeout(uport)); + timeout += 3 * timeout / port->tx_fifo_depth; + WRITE_ONCE(port->poll_timeout_us, timeout); + } + if (!uart_console(uport)) writel(port->loopback, uport->membase + SE_UART_LOOPBACK_CFG); diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index 4132fcff7d4e..8bab2aedc499 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -577,8 +577,8 @@ static void rp2_reset_asic(struct rp2_card *card, unsigned int asic_id) u32 clk_cfg;
writew(1, base + RP2_GLOBAL_CMD); - readw(base + RP2_GLOBAL_CMD); msleep(100); + readw(base + RP2_GLOBAL_CMD); writel(0, base + RP2_CLK_PRESCALER);
/* TDM clock configuration */ diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5bea3af46abc..20e797c9e13a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2696,14 +2696,13 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) int ret = 0;
tport = &state->port; - mutex_lock(&tport->mutex); + + guard(mutex)(&tport->mutex);
port = uart_port_check(state); if (!port || port->type == PORT_UNKNOWN || - !(port->ops->poll_get_char && port->ops->poll_put_char)) { - ret = -1; - goto out; - } + !(port->ops->poll_get_char && port->ops->poll_put_char)) + return -1;
pm_state = state->pm_state; uart_change_pm(state, UART_PM_STATE_ON); @@ -2723,10 +2722,10 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options) ret = uart_set_options(port, NULL, baud, parity, bits, flow); console_list_unlock(); } -out: + if (ret) uart_change_pm(state, pm_state); - mutex_unlock(&tport->mutex); + return ret; }
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index c87fdc849c62..ecdfff2456e3 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -93,7 +93,7 @@ static const struct __ufs_qcom_bw_table { [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 }, [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 }, [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 }, - [MODE_MAX][0][0] = { 7643136, 307200 }, + [MODE_MAX][0][0] = { 7643136, 819200 }, };
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index dbd83d321bca..46852529499d 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -718,7 +718,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, cur_td->last_trb, hw_deq);
- if (seg && (pep->ep_state & EP_ENABLED)) + if (seg && (pep->ep_state & EP_ENABLED) && + !(pep->ep_state & EP_DIS_IN_RROGRESS)) cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, cur_td, &deq_state); else @@ -736,7 +737,8 @@ int cdnsp_remove_request(struct cdnsp_device *pdev, * During disconnecting all endpoint will be disabled so we don't * have to worry about updating dequeue pointer. */ - if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { + if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING || + pep->ep_state & EP_DIS_IN_RROGRESS) { status = -ESHUTDOWN; ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); } diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index ceca4d839dfd..7ba760ee62e3 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -62,7 +62,9 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { .resume_quirk = xhci_cdns3_resume_quirk, };
-static const struct xhci_plat_priv xhci_plat_cdnsp_xhci; +static const struct xhci_plat_priv xhci_plat_cdnsp_xhci = { + .quirks = XHCI_CDNS_SCTX_QUIRK, +};
static int __cdns_host_init(struct cdns *cdns) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 0c1b69d944ca..605fea461102 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -962,10 +962,12 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss) struct acm *acm = tty->driver_data;
ss->line = acm->minor; + mutex_lock(&acm->port.mutex); ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10; ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : jiffies_to_msecs(acm->port.closing_wait) / 10; + mutex_unlock(&acm->port.mutex); return 0; }
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index a8605b02115b..1ad8fa3f862a 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -127,6 +127,15 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) role = USB_ROLE_DEVICE; }
+ if ((IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)) && + dwc2_is_device_mode(hsotg) && + hsotg->lx_state == DWC2_L2 && + hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && + hsotg->bus_suspended && + !hsotg->params.no_clock_gating) + dwc2_gadget_exit_clock_gating(hsotg, 0); + if (role == USB_ROLE_HOST) { already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index f37b0d8386c1..ff7bee78bcc4 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -1304,7 +1304,8 @@ static int dummy_urb_enqueue(
/* kick the scheduler, it'll do the rest */ if (!hrtimer_active(&dum_hcd->timer)) - hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), + HRTIMER_MODE_REL_SOFT);
done: spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); @@ -1325,7 +1326,7 @@ static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING && !list_empty(&dum_hcd->urbp_list)) - hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT);
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags); return rc; @@ -1995,7 +1996,8 @@ static enum hrtimer_restart dummy_timer(struct hrtimer *t) dum_hcd->udev = NULL; } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) { /* want a 1 msec delay here */ - hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(DUMMY_TIMER_INT_NSECS), + HRTIMER_MODE_REL_SOFT); }
spin_unlock_irqrestore(&dum->lock, flags); @@ -2389,7 +2391,7 @@ static int dummy_bus_resume(struct usb_hcd *hcd) dum_hcd->rh_state = DUMMY_RH_RUNNING; set_link_state(dum_hcd); if (!list_empty(&dum_hcd->urbp_list)) - hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + hrtimer_start(&dum_hcd->timer, ns_to_ktime(0), HRTIMER_MODE_REL_SOFT); hcd->state = HC_STATE_RUNNING; } spin_unlock_irq(&dum_hcd->dum->lock); @@ -2467,7 +2469,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd) { - hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); dum_hcd->timer.function = dummy_timer; dum_hcd->rh_state = DUMMY_RH_RUNNING; dum_hcd->stream_en_ep = 0; @@ -2497,7 +2499,7 @@ static int dummy_start(struct usb_hcd *hcd) return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock); - hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); dum_hcd->timer.function = dummy_timer; dum_hcd->rh_state = DUMMY_RH_RUNNING;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index dc1e345ab67e..994fd8b38bd0 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -55,6 +55,9 @@ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
+#define PCI_VENDOR_ID_PHYTIUM 0x1db7 +#define PCI_DEVICE_ID_PHYTIUM_XHCI 0xdc27 + /* Thunderbolt */ #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5 @@ -78,6 +81,9 @@ #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 #define PCI_DEVICE_ID_ASMEDIA_3242_XHCI 0x3242
+#define PCI_DEVICE_ID_CADENCE 0x17CD +#define PCI_DEVICE_ID_CADENCE_SSP 0x0200 + static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver; @@ -416,6 +422,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME;
+ if (pdev->vendor == PCI_VENDOR_ID_PHYTIUM && + pdev->device == PCI_DEVICE_ID_PHYTIUM_XHCI) + xhci->quirks |= XHCI_RESET_ON_RESUME; + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3432) @@ -473,6 +483,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; }
+ if (pdev->vendor == PCI_DEVICE_ID_CADENCE && + pdev->device == PCI_DEVICE_ID_CADENCE_SSP) + xhci->quirks |= XHCI_CDNS_SCTX_QUIRK; + /* xHC spec requires PCI devices to support D3hot and D3cold */ if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; @@ -655,8 +669,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) static void xhci_pci_remove(struct pci_dev *dev) { struct xhci_hcd *xhci; + bool set_power_d3;
xhci = hcd_to_xhci(pci_get_drvdata(dev)); + set_power_d3 = xhci->quirks & XHCI_SPURIOUS_WAKEUP;
xhci->xhc_state |= XHCI_STATE_REMOVING;
@@ -669,11 +685,11 @@ static void xhci_pci_remove(struct pci_dev *dev) xhci->shared_hcd = NULL; }
+ usb_hcd_pci_remove(dev); + /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + if (set_power_d3) pci_set_power_state(dev, PCI_D3hot); - - usb_hcd_pci_remove(dev); }
/* diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 4ea2c3e072a9..90726899bc5b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1399,6 +1399,20 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_stream_ctx *ctx = &ep->stream_info->stream_ctx_array[stream_id]; deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; + + /* + * Cadence xHCI controllers store some endpoint state + * information within Rsvd0 fields of Stream Endpoint + * context. This field is not cleared during Set TR + * Dequeue Pointer command which causes XDMA to skip + * over transfer ring and leads to data loss on stream + * pipe. + * To fix this issue driver must clear Rsvd0 field. + */ + if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { + ctx->reserved[0] = 0; + ctx->reserved[1] = 0; + } } else { deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ebd0afd59a60..0ea95ad4cb90 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1628,6 +1628,7 @@ struct xhci_hcd { #define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_WRITE_64_HI_LO BIT_ULL(47) +#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index c8098e9b432e..62b5a30edc42 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -107,7 +107,12 @@ static void appledisplay_complete(struct urb *urb) case ACD_BTN_BRIGHT_UP: case ACD_BTN_BRIGHT_DOWN: pdata->button_pressed = 1; - schedule_delayed_work(&pdata->work, 0); + /* + * there is a window during which no device + * is registered + */ + if (pdata->bd ) + schedule_delayed_work(&pdata->work, 0); break; case ACD_BTN_NONE: default: @@ -202,6 +207,7 @@ static int appledisplay_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct backlight_properties props; + struct backlight_device *backlight; struct appledisplay *pdata; struct usb_device *udev = interface_to_usbdev(iface); struct usb_endpoint_descriptor *endpoint; @@ -272,13 +278,14 @@ static int appledisplay_probe(struct usb_interface *iface, memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 0xff; - pdata->bd = backlight_device_register(bl_name, NULL, pdata, + backlight = backlight_device_register(bl_name, NULL, pdata, &appledisplay_bl_data, &props); - if (IS_ERR(pdata->bd)) { + if (IS_ERR(backlight)) { dev_err(&iface->dev, "Backlight registration failed\n"); - retval = PTR_ERR(pdata->bd); + retval = PTR_ERR(backlight); goto error; } + pdata->bd = backlight;
/* Try to get brightness */ brightness = appledisplay_bl_get_brightness(pdata->bd); diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index cecd7693b741..75f5a740cba3 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -88,6 +88,9 @@ static int vendor_command(struct cypress *dev, unsigned char request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, address, data, iobuf, CYPRESS_MAX_REQSIZE, USB_CTRL_GET_TIMEOUT); + /* we must not process garbage */ + if (retval < 2) + goto err_buf;
/* store returned data (more READs to be added) */ switch (request) { @@ -107,6 +110,7 @@ static int vendor_command(struct cypress *dev, unsigned char request, break; }
+err_buf: kfree(iobuf); error: return retval; diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 4745a320eae4..4a9859e03f6b 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -404,7 +404,6 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, struct usb_yurex *dev; int len = 0; char in_buffer[MAX_S64_STRLEN]; - unsigned long flags;
dev = file->private_data;
@@ -419,9 +418,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, return -EIO; }
- spin_lock_irqsave(&dev->lock, flags); + spin_lock_irq(&dev->lock); scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu); - spin_unlock_irqrestore(&dev->lock, flags); + spin_unlock_irq(&dev->lock); mutex_unlock(&dev->io_mutex);
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); @@ -511,8 +510,11 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, __func__, retval); goto error; } - if (set && timeout) + if (set && timeout) { + spin_lock_irq(&dev->lock); dev->bbu = c2; + spin_unlock_irq(&dev->lock); + } return timeout ? count : -EIO;
error: diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 17155ed17fdf..8cc43c866130 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -38,6 +38,10 @@
void ucsi_notify_common(struct ucsi *ucsi, u32 cci) { + /* Ignore bogus data in CCI if busy indicator is set. */ + if (cci & UCSI_CCI_BUSY) + return; + if (UCSI_CCI_CONNECTOR(cci)) ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
@@ -107,16 +111,14 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci, size = clamp(size, 0, 16);
ret = ucsi->ops->sync_control(ucsi, command); - if (ret) - return ret; + if (ucsi->ops->read_cci(ucsi, cci)) + return -EIO;
- ret = ucsi->ops->read_cci(ucsi, cci); + if (*cci & UCSI_CCI_BUSY) + return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY; if (ret) return ret;
- if (*cci & UCSI_CCI_BUSY) - return -EBUSY; - if (!(*cci & UCSI_CCI_COMMAND_COMPLETE)) return -EIO;
@@ -148,15 +150,7 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num) int ret;
command = UCSI_GET_ERROR_STATUS | UCSI_CONNECTOR_NUMBER(connector_num); - ret = ucsi_run_command(ucsi, command, &cci, - &error, sizeof(error), false); - - if (cci & UCSI_CCI_BUSY) { - ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false); - - return ret ? ret : -EBUSY; - } - + ret = ucsi_run_command(ucsi, command, &cci, &error, sizeof(error), false); if (ret < 0) return ret;
@@ -238,9 +232,8 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd, mutex_lock(&ucsi->ppm_lock);
ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack); - if (cci & UCSI_CCI_BUSY) - ret = ucsi_run_command(ucsi, UCSI_CANCEL, &cci, NULL, 0, false) ?: -EBUSY; - else if (cci & UCSI_CCI_ERROR) + + if (cci & UCSI_CCI_ERROR) ret = ucsi_read_error(ucsi, connector_num);
mutex_unlock(&ucsi->ppm_lock); @@ -1249,6 +1242,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
mutex_lock(&con->lock);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags)) + dev_err_once(ucsi->dev, "%s entered without EVENT_PENDING\n", + __func__); + command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command_common(ucsi, command, &con->status, diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 4758914ccf86..bf56f3d69625 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -581,6 +581,9 @@ static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) { + if (!mvdev->res.valid) + return; + for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) mlx5_vdpa_update_mr(mvdev, NULL, i);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 478cd46a49ed..5a49b5a6d496 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -209,11 +209,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) if (irq < 0) return;
- irq_bypass_unregister_producer(&vq->call_ctx.producer); if (!vq->call_ctx.ctx) return;
- vq->call_ctx.producer.token = vq->call_ctx.ctx; vq->call_ctx.producer.irq = irq; ret = irq_bypass_register_producer(&vq->call_ctx.producer); if (unlikely(ret)) @@ -709,6 +707,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, vq->last_avail_idx = vq_state.split.avail_index; } break; + case VHOST_SET_VRING_CALL: + if (vq->call_ctx.ctx) { + if (ops->get_status(vdpa) & + VIRTIO_CONFIG_S_DRIVER_OK) + vhost_vdpa_unsetup_vq_irq(v, idx); + vq->call_ctx.producer.token = NULL; + } + break; }
r = vhost_vring_ioctl(&v->vdev, cmd, argp); @@ -747,13 +753,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, cb.callback = vhost_vdpa_virtqueue_cb; cb.private = vq; cb.trigger = vq->call_ctx.ctx; + vq->call_ctx.producer.token = vq->call_ctx.ctx; + if (ops->get_status(vdpa) & + VIRTIO_CONFIG_S_DRIVER_OK) + vhost_vdpa_setup_vq_irq(v, idx); } else { cb.callback = NULL; cb.private = NULL; cb.trigger = NULL; } ops->set_vq_cb(vdpa, idx, &cb); - vhost_vdpa_setup_vq_irq(v, idx); break;
case VHOST_SET_VRING_NUM: @@ -1419,6 +1428,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) for (i = 0; i < nvqs; i++) { vqs[i] = &v->vqs[i]; vqs[i]->handle_kick = handle_vq_kick; + vqs[i]->call_ctx.ctx = NULL; } vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false, vhost_vdpa_process_iotlb_msg); diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c index 66fac8e5393e..a1144b150982 100644 --- a/drivers/video/fbdev/hpfb.c +++ b/drivers/video/fbdev/hpfb.c @@ -345,6 +345,7 @@ static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent) if (hpfb_init_one(paddr, vaddr)) { if (d->scode >= DIOII_SCBASE) iounmap((void *)vaddr); + release_mem_region(d->resource.start, resource_size(&d->resource)); return -ENOMEM; } return 0; diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 66d4628a96ae..c90f48ebb15e 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -407,6 +407,7 @@ static int xenfb_probe(struct xenbus_device *dev, /* complete the abuse: */ fb_info->pseudo_palette = fb_info->par; fb_info->par = info; + fb_info->device = &dev->dev;
fb_info->screen_buffer = info->fb;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index bc1f962e483b..b9095751e43b 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -127,10 +127,12 @@ static void __virtio_config_changed(struct virtio_device *dev) { struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- if (!dev->config_enabled) + if (!dev->config_core_enabled || dev->config_driver_disabled) dev->config_change_pending = true; - else if (drv && drv->config_changed) + else if (drv && drv->config_changed) { drv->config_changed(dev); + dev->config_change_pending = false; + } }
void virtio_config_changed(struct virtio_device *dev) @@ -143,20 +145,51 @@ void virtio_config_changed(struct virtio_device *dev) } EXPORT_SYMBOL_GPL(virtio_config_changed);
-static void virtio_config_disable(struct virtio_device *dev) +/** + * virtio_config_driver_disable - disable config change reporting by drivers + * @dev: the device to reset + * + * This is only allowed to be called by a driver and disabling can't + * be nested. + */ +void virtio_config_driver_disable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); - dev->config_enabled = false; + dev->config_driver_disabled = true; spin_unlock_irq(&dev->config_lock); } +EXPORT_SYMBOL_GPL(virtio_config_driver_disable);
-static void virtio_config_enable(struct virtio_device *dev) +/** + * virtio_config_driver_enable - enable config change reporting by drivers + * @dev: the device to reset + * + * This is only allowed to be called by a driver and enabling can't + * be nested. + */ +void virtio_config_driver_enable(struct virtio_device *dev) { spin_lock_irq(&dev->config_lock); - dev->config_enabled = true; + dev->config_driver_disabled = false; + if (dev->config_change_pending) + __virtio_config_changed(dev); + spin_unlock_irq(&dev->config_lock); +} +EXPORT_SYMBOL_GPL(virtio_config_driver_enable); + +static void virtio_config_core_disable(struct virtio_device *dev) +{ + spin_lock_irq(&dev->config_lock); + dev->config_core_enabled = false; + spin_unlock_irq(&dev->config_lock); +} + +static void virtio_config_core_enable(struct virtio_device *dev) +{ + spin_lock_irq(&dev->config_lock); + dev->config_core_enabled = true; if (dev->config_change_pending) __virtio_config_changed(dev); - dev->config_change_pending = false; spin_unlock_irq(&dev->config_lock); }
@@ -316,7 +349,7 @@ static int virtio_dev_probe(struct device *_d) if (drv->scan) drv->scan(dev);
- virtio_config_enable(dev); + virtio_config_core_enable(dev);
return 0;
@@ -331,7 +364,7 @@ static void virtio_dev_remove(struct device *_d) struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- virtio_config_disable(dev); + virtio_config_core_disable(dev);
drv->remove(dev);
@@ -443,7 +476,7 @@ int register_virtio_device(struct virtio_device *dev) goto out_ida_remove;
spin_lock_init(&dev->config_lock); - dev->config_enabled = false; + dev->config_core_enabled = false; dev->config_change_pending = false;
INIT_LIST_HEAD(&dev->vqs); @@ -500,14 +533,14 @@ int virtio_device_freeze(struct virtio_device *dev) struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); int ret;
- virtio_config_disable(dev); + virtio_config_core_disable(dev);
dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
if (drv && drv->freeze) { ret = drv->freeze(dev); if (ret) { - virtio_config_enable(dev); + virtio_config_core_enable(dev); return ret; } } @@ -557,7 +590,7 @@ int virtio_device_restore(struct virtio_device *dev) if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) virtio_device_ready(dev);
- virtio_config_enable(dev); + virtio_config_core_enable(dev);
return 0;
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c index e51fe1b78518..d73076b686d8 100644 --- a/drivers/watchdog/imx_sc_wdt.c +++ b/drivers/watchdog/imx_sc_wdt.c @@ -216,29 +216,6 @@ static int imx_sc_wdt_probe(struct platform_device *pdev) return devm_watchdog_register_device(dev, wdog); }
-static int __maybe_unused imx_sc_wdt_suspend(struct device *dev) -{ - struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev); - - if (watchdog_active(&imx_sc_wdd->wdd)) - imx_sc_wdt_stop(&imx_sc_wdd->wdd); - - return 0; -} - -static int __maybe_unused imx_sc_wdt_resume(struct device *dev) -{ - struct imx_sc_wdt_device *imx_sc_wdd = dev_get_drvdata(dev); - - if (watchdog_active(&imx_sc_wdd->wdd)) - imx_sc_wdt_start(&imx_sc_wdd->wdd); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(imx_sc_wdt_pm_ops, - imx_sc_wdt_suspend, imx_sc_wdt_resume); - static const struct of_device_id imx_sc_wdt_dt_ids[] = { { .compatible = "fsl,imx-sc-wdt", }, { /* sentinel */ } @@ -250,7 +227,6 @@ static struct platform_driver imx_sc_wdt_driver = { .driver = { .name = "imx-sc-wdt", .of_match_table = imx_sc_wdt_dt_ids, - .pm = &imx_sc_wdt_pm_ops, }, }; module_platform_driver(imx_sc_wdt_driver); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 35155258a7e2..a337edcf8faf 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -78,9 +78,15 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); + phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
+ /* If buffer is physically aligned, ensure DMA alignment. */ + if (IS_ALIGNED(p, algn) && + !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn)) + return 1; + for (i = 1; i < nr_pages; i++) if (pfn_to_bfn(++xen_pfn) != ++next_bfn) return 1; @@ -141,7 +147,7 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size, void *ret;
/* Align the allocation to the Xen page size */ - size = 1UL << (order + XEN_PAGE_SHIFT); + size = ALIGN(size, XEN_PAGE_SIZE);
ret = (void *)__get_free_pages(flags, get_order(size)); if (!ret) @@ -173,7 +179,7 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, int order = get_order(size);
/* Convert the size to actually allocated. */ - size = 1UL << (order + XEN_PAGE_SHIFT); + size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index cf792d4de4f1..64faa6c51f60 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -172,8 +172,7 @@ static int autofs_parse_fd(struct fs_context *fc, struct autofs_sb_info *sbi, ret = autofs_check_pipe(pipe); if (ret < 0) { errorf(fc, "Invalid/unusable pipe"); - if (param->type != fs_value_is_file) - fput(pipe); + fput(pipe); return -EBADF; }
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 3056c8aed8ef..2bc074035db4 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -152,6 +152,7 @@ struct btrfs_inode { * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes, * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to * update the VFS' inode number of bytes used. + * Also protects setting struct file::private_data. */ spinlock_t lock;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c8568b1a61c4..18554f34f2d3 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -459,6 +459,8 @@ struct btrfs_file_private { void *filldir_buf; u64 last_index; struct extent_state *llseek_cached_state; + /* Task that allocated this structure. */ + struct task_struct *owner_task; };
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index feec49e6f9c8..a5966324607d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6551,13 +6551,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) continue;
ret = btrfs_trim_free_extents(device, &group_trimmed); + + trimmed += group_trimmed; if (ret) { dev_failed++; dev_ret = ret; break; } - - trimmed += group_trimmed; } mutex_unlock(&fs_devices->device_list_mutex);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 2aeb8116549c..d386f643079b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3485,7 +3485,7 @@ static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence, static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) { struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host); - struct btrfs_file_private *private = file->private_data; + struct btrfs_file_private *private; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_state *cached_state = NULL; struct extent_state **delalloc_cached_state; @@ -3513,7 +3513,19 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) inode_get_bytes(&inode->vfs_inode) == i_size) return i_size;
- if (!private) { + spin_lock(&inode->lock); + private = file->private_data; + spin_unlock(&inode->lock); + + if (private && private->owner_task != current) { + /* + * Not allocated by us, don't use it as its cached state is used + * by the task that allocated it and we don't want neither to + * mess with it nor get incorrect results because it reflects an + * invalid state for the current task. + */ + private = NULL; + } else if (!private) { private = kzalloc(sizeof(*private), GFP_KERNEL); /* * No worries if memory allocation failed. @@ -3521,7 +3533,23 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence) * lseek SEEK_HOLE/DATA calls to a file when there's delalloc, * so everything will still be correct. */ - file->private_data = private; + if (private) { + bool free = false; + + private->owner_task = current; + + spin_lock(&inode->lock); + if (file->private_data) + free = true; + else + file->private_data = private; + spin_unlock(&inode->lock); + + if (free) { + kfree(private); + private = NULL; + } + } }
if (private) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e0a664b8a46a..94d8f29b04c5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -543,13 +543,11 @@ static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info,
range.minlen = max(range.minlen, minlen); ret = btrfs_trim_fs(fs_info, &range); - if (ret < 0) - return ret;
if (copy_to_user(arg, &range, sizeof(range))) return -EFAULT;
- return 0; + return ret; }
int __pure btrfs_is_empty_uuid(const u8 *uuid) diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 8ddd5fcbeb93..5941b3662709 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -900,8 +900,14 @@ void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct fol }
#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ - bitmap_cut(dst, subpage->bitmaps, 0, \ - subpage_info->name##_offset, subpage_info->bitmap_nr_bits) +{ \ + const int bitmap_nr_bits = subpage_info->bitmap_nr_bits; \ + \ + ASSERT(bitmap_nr_bits < BITS_PER_LONG); \ + *dst = bitmap_read(subpage->bitmaps, \ + subpage_info->name##_offset, \ + bitmap_nr_bits); \ +}
void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 634d69964fe4..7b50263723bc 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1517,7 +1517,7 @@ static int check_extent_item(struct extent_buffer *leaf, dref_objectid > BTRFS_LAST_FREE_OBJECTID)) { extent_err(leaf, slot, "invalid data ref objectid value %llu", - dref_root); + dref_objectid); return -EUCLEAN; } if (unlikely(!IS_ALIGNED(dref_offset, diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 4dd8a993c60a..7c6f260a3be5 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -64,9 +64,15 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object) memcpy(buf->data, fscache_get_aux(object->cookie), len);
ret = cachefiles_inject_write_error(); - if (ret == 0) - ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, - buf, sizeof(struct cachefiles_xattr) + len, 0); + if (ret == 0) { + ret = mnt_want_write_file(file); + if (ret == 0) { + ret = vfs_setxattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache, buf, + sizeof(struct cachefiles_xattr) + len, 0); + mnt_drop_write_file(file); + } + } if (ret < 0) { trace_cachefiles_vfs_error(object, file_inode(file), ret, cachefiles_trace_setxattr_error); @@ -151,8 +157,14 @@ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, int ret;
ret = cachefiles_inject_remove_error(); - if (ret == 0) - ret = vfs_removexattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache); + if (ret == 0) { + ret = mnt_want_write(cache->mnt); + if (ret == 0) { + ret = vfs_removexattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache); + mnt_drop_write(cache->mnt); + } + } if (ret < 0) { trace_cachefiles_vfs_error(object, d_inode(dentry), ret, cachefiles_trace_remxattr_error); @@ -208,9 +220,15 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume) memcpy(buf->data, p, volume->vcookie->coherency_len);
ret = cachefiles_inject_write_error(); - if (ret == 0) - ret = vfs_setxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, - buf, len, 0); + if (ret == 0) { + ret = mnt_want_write(volume->cache->mnt); + if (ret == 0) { + ret = vfs_setxattr(&nop_mnt_idmap, dentry, + cachefiles_xattr_cache, + buf, len, 0); + mnt_drop_write(volume->cache->mnt); + } + } if (ret < 0) { trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret, cachefiles_trace_setxattr_error); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 91521576f500..66d9b3b4c588 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -89,12 +89,14 @@ enum { Opt_uid, Opt_gid, Opt_mode, + Opt_source, };
static const struct fs_parameter_spec debugfs_param_specs[] = { fsparam_gid ("gid", Opt_gid), fsparam_u32oct ("mode", Opt_mode), fsparam_uid ("uid", Opt_uid), + fsparam_string ("source", Opt_source), {} };
@@ -126,6 +128,12 @@ static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param case Opt_mode: opts->mode = result.uint_32 & S_IALLUGO; break; + case Opt_source: + if (fc->source) + return invalfc(fc, "Multiple sources specified"); + fc->source = param->string; + param->string = NULL; + break; /* * We might like to report bad mount options here; * but traditionally debugfs has ignored all mount options diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index c2253b6a5416..eb318c7ddd80 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -539,7 +539,7 @@ int __init z_erofs_init_decompressor(void) for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) { err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0; if (err) { - while (--i) + while (i--) if (z_erofs_decomp[i]) z_erofs_decomp[i]->exit(); return err; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 419432be3223..4e3ea6689cb4 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -178,12 +178,14 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, unsigned int m_pofs) { struct erofs_inode *vi = EROFS_I(inode); - unsigned int bsz = i_blocksize(inode); + loff_t off; char *lnk;
- /* if it cannot be handled with fast symlink scheme */ - if (vi->datalayout != EROFS_INODE_FLAT_INLINE || - inode->i_size >= bsz || inode->i_size < 0) { + m_pofs += vi->xattr_isize; + /* check if it cannot be handled with fast symlink scheme */ + if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || + check_add_overflow(m_pofs, inode->i_size, &off) || + off > i_blocksize(inode)) { inode->i_op = &erofs_symlink_iops; return 0; } @@ -192,16 +194,6 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, if (!lnk) return -ENOMEM;
- m_pofs += vi->xattr_isize; - /* inline symlink data shouldn't cross block boundary */ - if (m_pofs + inode->i_size > bsz) { - kfree(lnk); - erofs_err(inode->i_sb, - "inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } memcpy(lnk, kaddr + m_pofs, inode->i_size); lnk[inode->i_size] = '\0';
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 424f656cd765..a0bae499c5ff 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1428,6 +1428,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, struct z_erofs_bvec zbv; struct address_space *mapping; struct folio *folio; + struct page *page; int bs = i_blocksize(f->inode);
/* Except for inplace folios, the entire folio can be used for I/Os */ @@ -1450,7 +1451,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * file-backed folios will be used instead. */ if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { - folio->private = 0; tocache = true; goto out_tocache; } @@ -1468,7 +1468,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, }
folio_lock(folio); - if (folio->mapping == mc) { + if (likely(folio->mapping == mc)) { /* * The cached folio is still in managed cache but without * a valid `->private` pcluster hint. Let's reconnect them. @@ -1478,41 +1478,44 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, /* compressed_bvecs[] already takes a ref before */ folio_put(folio); } - - /* no need to submit if it is already up-to-date */ - if (folio_test_uptodate(folio)) { - folio_unlock(folio); - bvec->bv_page = NULL; + if (likely(folio->private == pcl)) { + /* don't submit cache I/Os again if already uptodate */ + if (folio_test_uptodate(folio)) { + folio_unlock(folio); + bvec->bv_page = NULL; + } + return; } - return; + /* + * Already linked with another pcluster, which only appears in + * crafted images by fuzzers for now. But handle this anyway. + */ + tocache = false; /* use temporary short-lived pages */ + } else { + DBG_BUGON(1); /* referenced managed folios can't be truncated */ + tocache = true; } - - /* - * It has been truncated, so it's unsafe to reuse this one. Let's - * allocate a new page for compressed data. - */ - DBG_BUGON(folio->mapping); - tocache = true; folio_unlock(folio); folio_put(folio); out_allocfolio: - zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); spin_lock(&pcl->obj.lockref.lock); - if (pcl->compressed_bvecs[nr].page) { - erofs_pagepool_add(&f->pagepool, zbv.page); + if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) { + erofs_pagepool_add(&f->pagepool, page); spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } - bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page; - folio = page_folio(zbv.page); - /* first mark it as a temporary shortlived folio (now 1 ref) */ - folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; + bvec->bv_page = pcl->compressed_bvecs[nr].page = page; + folio = page_folio(page); spin_unlock(&pcl->obj.lockref.lock); out_tocache: if (!tocache || bs != PAGE_SIZE || - filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) + filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived folio (1 ref) */ + folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; return; + } folio_attach_private(folio, pcl); /* drop a refcount added by allocpage (then 2 refs in total here) */ folio_put(folio); @@ -1647,13 +1650,10 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, cur = mdev.m_pa; end = cur + pcl->pclustersize; do { - z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); - if (!bvec.bv_page) - continue; - + bvec.bv_page = NULL; if (bio && (cur != last_pa || bio->bi_bdev != mdev.m_bdev)) { -io_retry: +drain_io: if (!erofs_is_fscache_mode(sb)) submit_bio(bio); else @@ -1666,6 +1666,15 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, bio = NULL; }
+ if (!bvec.bv_page) { + z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); + if (!bvec.bv_page) + continue; + if (cur + bvec.bv_len > end) + bvec.bv_len = end - cur; + DBG_BUGON(bvec.bv_len < sb->s_blocksize); + } + if (unlikely(PageWorkingset(bvec.bv_page)) && !memstall) { psi_memstall_enter(&pflags); @@ -1685,13 +1694,9 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, ++nr_bios; }
- if (cur + bvec.bv_len > end) - bvec.bv_len = end - cur; - DBG_BUGON(bvec.bv_len < sb->s_blocksize); if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset)) - goto io_retry; - + goto drain_io; last_pa = cur + bvec.bv_len; bypass = false; } while ((cur += bvec.bv_len) < end); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index f53ca4f7fced..6d0e2f547ae7 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -420,7 +420,7 @@ static bool busy_loop_ep_timeout(unsigned long start_time,
static bool ep_busy_loop_on(struct eventpoll *ep) { - return !!ep->busy_poll_usecs || net_busy_loop_on(); + return !!READ_ONCE(ep->busy_poll_usecs) || net_busy_loop_on(); }
static bool ep_busy_loop_end(void *p, unsigned long start_time) diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index afdf13c34ff5..1ac011088ce7 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -779,8 +779,11 @@ int exfat_create_upcase_table(struct super_block *sb) le32_to_cpu(ep->dentry.upcase.checksum));
brelse(bh); - if (ret && ret != -EIO) + if (ret && ret != -EIO) { + /* free memory from exfat_load_upcase_table call */ + exfat_free_upcase_table(sbi); goto load_default; + }
/* load successfully */ return ret; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 9dfd768ed9f8..81641be38c0e 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -514,6 +514,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, if (min_inodes < 1) min_inodes = 1; min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4; + if (min_clusters < 0) + min_clusters = 0;
/* * Start looking in the flex group where we last allocated an @@ -755,10 +757,10 @@ int ext4_mark_inode_used(struct super_block *sb, int ino) struct ext4_group_desc *gdp; ext4_group_t group; int bit; - int err = -EFSCORRUPTED; + int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino) - goto out; + return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb); @@ -860,6 +862,7 @@ int ext4_mark_inode_used(struct super_block *sb, int ino) err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh); sync_dirty_buffer(group_desc_bh); out: + brelse(inode_bitmap_bh); return err; }
@@ -1053,12 +1056,13 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap, brelse(inode_bitmap_bh); inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); /* Skip groups with suspicious inode tables */ - if (((!(sbi->s_mount_state & EXT4_FC_REPLAY)) - && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || - IS_ERR(inode_bitmap_bh)) { + if (IS_ERR(inode_bitmap_bh)) { inode_bitmap_bh = NULL; goto next_group; } + if (!(sbi->s_mount_state & EXT4_FC_REPLAY) && + EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + goto next_group;
repeat_in_this_group: ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index e7a09a99837b..44a5f6df59ec 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1664,24 +1664,36 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, struct ext4_dir_entry_2 **res_dir, int *has_inline_data) { + struct ext4_xattr_ibody_find is = { + .s = { .not_found = -ENODATA, }, + }; + struct ext4_xattr_info i = { + .name_index = EXT4_XATTR_INDEX_SYSTEM, + .name = EXT4_XATTR_SYSTEM_DATA, + }; int ret; - struct ext4_iloc iloc; void *inline_start; int inline_size;
- if (ext4_get_inode_loc(dir, &iloc)) - return NULL; + ret = ext4_get_inode_loc(dir, &is.iloc); + if (ret) + return ERR_PTR(ret);
down_read(&EXT4_I(dir)->xattr_sem); + + ret = ext4_xattr_ibody_find(dir, &i, &is); + if (ret) + goto out; + if (!ext4_has_inline_data(dir)) { *has_inline_data = 0; goto out; }
- inline_start = (void *)ext4_raw_inode(&iloc)->i_block + + inline_start = (void *)ext4_raw_inode(&is.iloc)->i_block + EXT4_INLINE_DOTDOT_SIZE; inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; - ret = ext4_search_dir(iloc.bh, inline_start, inline_size, + ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size, dir, fname, 0, res_dir); if (ret == 1) goto out_find; @@ -1691,20 +1703,23 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir, if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE) goto out;
- inline_start = ext4_get_inline_xattr_pos(dir, &iloc); + inline_start = ext4_get_inline_xattr_pos(dir, &is.iloc); inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
- ret = ext4_search_dir(iloc.bh, inline_start, inline_size, + ret = ext4_search_dir(is.iloc.bh, inline_start, inline_size, dir, fname, 0, res_dir); if (ret == 1) goto out_find;
out: - brelse(iloc.bh); - iloc.bh = NULL; + brelse(is.iloc.bh); + if (ret < 0) + is.iloc.bh = ERR_PTR(ret); + else + is.iloc.bh = NULL; out_find: up_read(&EXT4_I(dir)->xattr_sem); - return iloc.bh; + return is.iloc.bh; }
int ext4_delete_inline_entry(handle_t *handle, diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 9dda9cd68ab2..dfecd25cee4e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3887,11 +3887,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb, /* * Clear the trimmed flag for the group so that the next * ext4_trim_fs can trim it. - * If the volume is mounted with -o discard, online discard - * is supported and the free blocks will be trimmed online. */ - if (!test_opt(sb, DISCARD)) - EXT4_MB_GRP_CLEAR_TRIMMED(db); + EXT4_MB_GRP_CLEAR_TRIMMED(db);
if (!db->bb_free_root.rb_node) { /* No more items in the per group rb tree @@ -6515,8 +6512,9 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, " group:%u block:%d count:%lu failed" " with %d", block_group, bit, count, err); - } else - EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); + } + + EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
ext4_lock_group(sb, block_group); mb_free_blocks(inode, &e4b, bit, count_clusters); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e72145c4ae5a..7e73e13741d1 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5165,6 +5165,18 @@ static int ext4_block_group_meta_init(struct super_block *sb, int silent) return 0; }
+/* + * It's hard to get stripe aligned blocks if stripe is not aligned with + * cluster, just disable stripe and alert user to simplify code and avoid + * stripe aligned allocation which will rarely succeed. + */ +static bool ext4_is_stripe_incompatible(struct super_block *sb, unsigned long stripe) +{ + struct ext4_sb_info *sbi = EXT4_SB(sb); + return (stripe > 0 && sbi->s_cluster_ratio > 1 && + stripe % sbi->s_cluster_ratio != 0); +} + static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) { struct ext4_super_block *es = NULL; @@ -5272,13 +5284,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) goto failed_mount3;
sbi->s_stripe = ext4_get_stripe_size(sbi); - /* - * It's hard to get stripe aligned blocks if stripe is not aligned with - * cluster, just disable stripe and alert user to simpfy code and avoid - * stripe aligned allocation which will rarely successes. - */ - if (sbi->s_stripe > 0 && sbi->s_cluster_ratio > 1 && - sbi->s_stripe % sbi->s_cluster_ratio != 0) { + if (ext4_is_stripe_incompatible(sb, sbi->s_stripe)) { ext4_msg(sb, KERN_WARNING, "stripe (%lu) is not aligned with cluster size (%u), " "stripe is disabled", @@ -6441,6 +6447,15 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
}
+ if ((ctx->spec & EXT4_SPEC_s_stripe) && + ext4_is_stripe_incompatible(sb, ctx->s_stripe)) { + ext4_msg(sb, KERN_WARNING, + "stripe (%lu) is not aligned with cluster size (%u), " + "stripe is disabled", + ctx->s_stripe, sbi->s_cluster_ratio); + ctx->s_stripe = 0; + } + /* * Changing the DIOREAD_NOLOCK or DELALLOC mount options may cause * two calls to ext4_should_dioread_nolock() to return inconsistent diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 990b93689b46..f55d54bb12f4 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -945,7 +945,7 @@ static int __f2fs_get_cluster_blocks(struct inode *inode, unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; int count, i;
- for (i = 1, count = 1; i < cluster_size; i++) { + for (i = 0, count = 0; i < cluster_size; i++) { block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i);
@@ -956,8 +956,8 @@ static int __f2fs_get_cluster_blocks(struct inode *inode, return count; }
-static int __f2fs_cluster_blocks(struct inode *inode, - unsigned int cluster_idx, bool compr_blks) +static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx, + enum cluster_check_type type) { struct dnode_of_data dn; unsigned int start_idx = cluster_idx << @@ -978,10 +978,12 @@ static int __f2fs_cluster_blocks(struct inode *inode, }
if (dn.data_blkaddr == COMPRESS_ADDR) { - if (compr_blks) - ret = __f2fs_get_cluster_blocks(inode, &dn); - else + if (type == CLUSTER_COMPR_BLKS) + ret = 1 + __f2fs_get_cluster_blocks(inode, &dn); + else if (type == CLUSTER_IS_COMPR) ret = 1; + } else if (type == CLUSTER_RAW_BLKS) { + ret = __f2fs_get_cluster_blocks(inode, &dn); } fail: f2fs_put_dnode(&dn); @@ -991,7 +993,16 @@ static int __f2fs_cluster_blocks(struct inode *inode, /* return # of compressed blocks in compressed cluster */ static int f2fs_compressed_blocks(struct compress_ctx *cc) { - return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true); + return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, + CLUSTER_COMPR_BLKS); +} + +/* return # of raw blocks in non-compressed cluster */ +static int f2fs_decompressed_blocks(struct inode *inode, + unsigned int cluster_idx) +{ + return __f2fs_cluster_blocks(inode, cluster_idx, + CLUSTER_RAW_BLKS); }
/* return whether cluster is compressed one or not */ @@ -999,7 +1010,16 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) { return __f2fs_cluster_blocks(inode, index >> F2FS_I(inode)->i_log_cluster_size, - false); + CLUSTER_IS_COMPR); +} + +/* return whether cluster contains non raw blocks or not */ +bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index) +{ + unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size; + + return f2fs_decompressed_blocks(inode, cluster_idx) != + F2FS_I(inode)->i_cluster_size; }
static bool cluster_may_compress(struct compress_ctx *cc) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 6457e5bca9c9..be66b3a0e793 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2650,10 +2650,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) struct dnode_of_data dn; struct node_info ni; bool ipu_force = false; + bool atomic_commit; int err = 0;
/* Use COW inode to make dnode_of_data for atomic write */ - if (f2fs_is_atomic_file(inode)) + atomic_commit = f2fs_is_atomic_file(inode) && + page_private_atomic(fio->page); + if (atomic_commit) set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); else set_new_dnode(&dn, inode, NULL, NULL, 0); @@ -2752,6 +2755,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio) f2fs_outplace_write_data(&dn, fio); trace_f2fs_do_write_data_page(page_folio(page), OPU); set_inode_flag(inode, FI_APPEND_WRITE); + if (atomic_commit) + clear_page_private_atomic(page); out_writepage: f2fs_put_dnode(&dn); out: @@ -3721,6 +3726,9 @@ static int f2fs_write_end(struct file *file,
set_page_dirty(page);
+ if (f2fs_is_atomic_file(inode)) + set_page_private_atomic(page); + if (pos + copied > i_size_read(inode) && !f2fs_verity_in_progress(inode)) { f2fs_i_size_write(inode, pos + copied); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index cbd7a5e96a37..14900ca8a9ff 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -166,7 +166,8 @@ static unsigned long dir_block_index(unsigned int level, unsigned long bidx = 0;
for (i = 0; i < level; i++) - bidx += dir_buckets(i, dir_level) * bucket_blocks(i); + bidx += mul_u32_u32(dir_buckets(i, dir_level), + bucket_blocks(i)); bidx += idx * bucket_blocks(level); return bidx; } diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index fd1fc06359ee..62ac440d9416 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -366,7 +366,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, static void __drop_largest_extent(struct extent_tree *et, pgoff_t fofs, unsigned int len) { - if (fofs < et->largest.fofs + et->largest.len && + if (fofs < (pgoff_t)et->largest.fofs + et->largest.len && fofs + len > et->largest.fofs) { et->largest.len = 0; et->largest_updated = true; @@ -456,7 +456,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (type == EX_READ && et->largest.fofs <= pgofs && - et->largest.fofs + et->largest.len > pgofs) { + (pgoff_t)et->largest.fofs + et->largest.len > pgofs) { *ei = et->largest; ret = true; stat_inc_largest_node_hit(sbi); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index ac19c61f0c3e..40e96d577982 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -285,6 +285,7 @@ enum { APPEND_INO, /* for append ino list */ UPDATE_INO, /* for update ino list */ TRANS_DIR_INO, /* for transactions dir ino list */ + XATTR_DIR_INO, /* for xattr updated dir ino list */ FLUSH_INO, /* for multiple device flushing */ MAX_INO_ENTRY, /* max. list */ }; @@ -784,7 +785,6 @@ enum { FI_NEED_IPU, /* used for ipu per file */ FI_ATOMIC_FILE, /* indicate atomic file */ FI_DATA_EXIST, /* indicate data exists */ - FI_INLINE_DOTS, /* indicate inline dot dentries */ FI_SKIP_WRITES, /* should skip data page writeback */ FI_OPU_WRITE, /* used for opu per file */ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ @@ -802,6 +802,7 @@ enum { FI_ALIGNED_WRITE, /* enable aligned write */ FI_COW_FILE, /* indicate COW file */ FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ + FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */ FI_ATOMIC_REPLACE, /* indicate atomic replace */ FI_OPENED_FILE, /* indicate file has been opened */ FI_MAX, /* max flag, never be used */ @@ -1155,6 +1156,7 @@ enum cp_reason_type { CP_FASTBOOT_MODE, CP_SPEC_LOG_NUM, CP_RECOVER_DIR, + CP_XATTR_DIR, };
enum iostat_type { @@ -1418,7 +1420,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr); * bit 1 PAGE_PRIVATE_ONGOING_MIGRATION * bit 2 PAGE_PRIVATE_INLINE_INODE * bit 3 PAGE_PRIVATE_REF_RESOURCE - * bit 4- f2fs private data + * bit 4 PAGE_PRIVATE_ATOMIC_WRITE + * bit 5- f2fs private data * * Layout B: lowest bit should be 0 * page.private is a wrapped pointer. @@ -1428,6 +1431,7 @@ enum { PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ + PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */ PAGE_PRIVATE_MAX };
@@ -2396,14 +2400,17 @@ static inline void clear_page_private_##name(struct page *page) \ PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); +PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); +PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); +PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
static inline unsigned long get_page_private_data(struct page *page) { @@ -2435,6 +2442,7 @@ static inline void clear_page_private_all(struct page *page) clear_page_private_reference(page); clear_page_private_gcing(page); clear_page_private_inline(page); + clear_page_private_atomic(page);
f2fs_bug_on(F2FS_P_SB(page), page_private(page)); } @@ -3038,10 +3046,8 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, return; fallthrough; case FI_DATA_EXIST: - case FI_INLINE_DOTS: case FI_PIN_FILE: case FI_COMPRESS_RELEASED: - case FI_ATOMIC_COMMITTED: f2fs_mark_inode_dirty_sync(inode, true); } } @@ -3163,8 +3169,6 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) set_bit(FI_INLINE_DENTRY, fi->flags); if (ri->i_inline & F2FS_DATA_EXIST) set_bit(FI_DATA_EXIST, fi->flags); - if (ri->i_inline & F2FS_INLINE_DOTS) - set_bit(FI_INLINE_DOTS, fi->flags); if (ri->i_inline & F2FS_EXTRA_ATTR) set_bit(FI_EXTRA_ATTR, fi->flags); if (ri->i_inline & F2FS_PIN_FILE) @@ -3185,8 +3189,6 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) ri->i_inline |= F2FS_INLINE_DENTRY; if (is_inode_flag_set(inode, FI_DATA_EXIST)) ri->i_inline |= F2FS_DATA_EXIST; - if (is_inode_flag_set(inode, FI_INLINE_DOTS)) - ri->i_inline |= F2FS_INLINE_DOTS; if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) ri->i_inline |= F2FS_EXTRA_ATTR; if (is_inode_flag_set(inode, FI_PIN_FILE)) @@ -3267,11 +3269,6 @@ static inline int f2fs_exist_data(struct inode *inode) return is_inode_flag_set(inode, FI_DATA_EXIST); }
-static inline int f2fs_has_inline_dots(struct inode *inode) -{ - return is_inode_flag_set(inode, FI_INLINE_DOTS); -} - static inline int f2fs_is_mmap_file(struct inode *inode) { return is_inode_flag_set(inode, FI_MMAP_FILE); @@ -3495,7 +3492,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, - bool readonly); + bool readonly, bool need_lock); int f2fs_precache_extents(struct inode *inode); int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); int f2fs_fileattr_set(struct mnt_idmap *idmap, @@ -4289,6 +4286,11 @@ static inline bool f2fs_meta_inode_gc_required(struct inode *inode) * compress.c */ #ifdef CONFIG_F2FS_FS_COMPRESSION +enum cluster_check_type { + CLUSTER_IS_COMPR, /* check only if compressed cluster */ + CLUSTER_COMPR_BLKS, /* return # of compressed blocks in a cluster */ + CLUSTER_RAW_BLKS /* return # of raw blocks in a cluster */ +}; bool f2fs_is_compressed_page(struct page *page); struct page *f2fs_compress_control_page(struct page *page); int f2fs_prepare_compress_overwrite(struct inode *inode, @@ -4315,6 +4317,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, struct writeback_control *wbc, enum iostat_type io_type); int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); +bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index); void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, pgoff_t fofs, block_t blkaddr, unsigned int llen, unsigned int c_len); @@ -4401,6 +4404,12 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { } #define inc_compr_inode_stat(inode) do { } while (0) +static inline int f2fs_is_compressed_cluster( + struct inode *inode, + pgoff_t index) { return 0; } +static inline bool f2fs_is_sparse_cluster( + struct inode *inode, + pgoff_t index) { return true; } static inline void f2fs_update_read_extent_tree_range_compressed( struct inode *inode, pgoff_t fofs, block_t blkaddr, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 168f08507004..80ebc13ccb35 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -218,6 +218,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO)) cp_reason = CP_RECOVER_DIR; + else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, + XATTR_DIR_INO)) + cp_reason = CP_XATTR_DIR;
return cp_reason; } @@ -1052,6 +1055,13 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, return err; }
+ /* + * wait for inflight dio, blocks should be removed after + * IO completion. + */ + if (attr->ia_size < old_size) + inode_dio_wait(inode); + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping);
@@ -1888,6 +1898,12 @@ static long f2fs_fallocate(struct file *file, int mode, if (ret) goto out;
+ /* + * wait for inflight dio, blocks should be removed after IO + * completion. + */ + inode_dio_wait(inode); + if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; @@ -2116,10 +2132,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) struct mnt_idmap *idmap = file_mnt_idmap(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct inode *pinode; loff_t isize; int ret;
+ if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + if (!inode_owner_or_capable(idmap, inode)) return -EACCES;
@@ -2166,15 +2184,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) /* Check if the inode already has a COW inode */ if (fi->cow_inode == NULL) { /* Create a COW inode for atomic write */ - pinode = f2fs_iget(inode->i_sb, fi->i_pino); - if (IS_ERR(pinode)) { - f2fs_up_write(&fi->i_gc_rwsem[WRITE]); - ret = PTR_ERR(pinode); - goto out; - } + struct dentry *dentry = file_dentry(filp); + struct inode *dir = d_inode(dentry->d_parent);
- ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode); - iput(pinode); + ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode); if (ret) { f2fs_up_write(&fi->i_gc_rwsem[WRITE]); goto out; @@ -2187,6 +2200,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) F2FS_I(fi->cow_inode)->atomic_inode = inode; } else { /* Reuse the already created COW inode */ + f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode)); + + invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1); + ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true); if (ret) { f2fs_up_write(&fi->i_gc_rwsem[WRITE]); @@ -2228,6 +2245,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) struct mnt_idmap *idmap = file_mnt_idmap(filp); int ret;
+ if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + if (!inode_owner_or_capable(idmap, inode)) return -EACCES;
@@ -2260,6 +2280,9 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp) struct mnt_idmap *idmap = file_mnt_idmap(filp); int ret;
+ if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + if (!inode_owner_or_capable(idmap, inode)) return -EACCES;
@@ -2279,7 +2302,7 @@ static int f2fs_ioc_abort_atomic_write(struct file *filp) }
int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, - bool readonly) + bool readonly, bool need_lock) { struct super_block *sb = sbi->sb; int ret = 0; @@ -2326,12 +2349,19 @@ int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, if (readonly) goto out;
+ /* grab sb->s_umount to avoid racing w/ remount() */ + if (need_lock) + down_read(&sbi->sb->s_umount); + f2fs_stop_gc_thread(sbi); f2fs_stop_discard_thread(sbi);
f2fs_drop_discard_cmd(sbi); clear_opt(sbi, DISCARD);
+ if (need_lock) + up_read(&sbi->sb->s_umount); + f2fs_update_time(sbi, REQ_TIME); out:
@@ -2368,7 +2398,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) } }
- ret = f2fs_do_shutdown(sbi, in, readonly); + ret = f2fs_do_shutdown(sbi, in, readonly, true);
if (need_drop) mnt_drop_write_file(filp); @@ -2686,7 +2716,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, (range->start + range->len) >> PAGE_SHIFT, DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE));
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { + if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) || + f2fs_is_atomic_file(inode)) { err = -EINVAL; goto unlock_out; } @@ -2710,7 +2741,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, * block addresses are continuous. */ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) { - if (ei.fofs + ei.len >= pg_end) + if ((pgoff_t)ei.fofs + ei.len >= pg_end) goto out; }
@@ -2793,6 +2824,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, goto clear_out; }
+ f2fs_wait_on_page_writeback(page, DATA, true, true); + set_page_dirty(page); set_page_private_gcing(page); f2fs_put_page(page, 1); @@ -2917,6 +2950,11 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, goto out_unlock; }
+ if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) { + ret = -EINVAL; + goto out_unlock; + } + ret = -EINVAL; if (pos_in + len > src->i_size || pos_in + len < pos_in) goto out_unlock; @@ -3300,6 +3338,11 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
+ if (f2fs_is_atomic_file(inode)) { + ret = -EINVAL; + goto out; + } + if (!pin) { clear_inode_flag(inode, FI_PIN_FILE); f2fs_i_gc_failures_write(inode, 0); @@ -4193,6 +4236,8 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) /* It will never fail, when page has pinned above */ f2fs_bug_on(F2FS_I_SB(inode), !page);
+ f2fs_wait_on_page_writeback(page, DATA, true, true); + set_page_dirty(page); set_page_private_gcing(page); f2fs_put_page(page, 1); @@ -4207,9 +4252,8 @@ static int f2fs_ioc_decompress_file(struct file *filp) struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); - pgoff_t page_idx = 0, last_idx; - int cluster_size = fi->i_cluster_size; - int count, ret; + pgoff_t page_idx = 0, last_idx, cluster_idx; + int ret;
if (!f2fs_sb_has_compression(sbi) || F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) @@ -4244,10 +4288,15 @@ static int f2fs_ioc_decompress_file(struct file *filp) goto out;
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + last_idx >>= fi->i_log_cluster_size; + + for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { + page_idx = cluster_idx << fi->i_log_cluster_size;
- count = last_idx - page_idx; - while (count && count >= cluster_size) { - ret = redirty_blocks(inode, page_idx, cluster_size); + if (!f2fs_is_compressed_cluster(inode, page_idx)) + continue; + + ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); if (ret < 0) break;
@@ -4257,9 +4306,6 @@ static int f2fs_ioc_decompress_file(struct file *filp) break; }
- count -= cluster_size; - page_idx += cluster_size; - cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; @@ -4286,9 +4332,9 @@ static int f2fs_ioc_compress_file(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - pgoff_t page_idx = 0, last_idx; - int cluster_size = F2FS_I(inode)->i_cluster_size; - int count, ret; + struct f2fs_inode_info *fi = F2FS_I(inode); + pgoff_t page_idx = 0, last_idx, cluster_idx; + int ret;
if (!f2fs_sb_has_compression(sbi) || F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) @@ -4322,10 +4368,15 @@ static int f2fs_ioc_compress_file(struct file *filp) set_inode_flag(inode, FI_ENABLE_COMPRESS);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + last_idx >>= fi->i_log_cluster_size;
- count = last_idx - page_idx; - while (count && count >= cluster_size) { - ret = redirty_blocks(inode, page_idx, cluster_size); + for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { + page_idx = cluster_idx << fi->i_log_cluster_size; + + if (f2fs_is_sparse_cluster(inode, page_idx)) + continue; + + ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); if (ret < 0) break;
@@ -4335,9 +4386,6 @@ static int f2fs_ioc_compress_file(struct file *filp) break; }
- count -= cluster_size; - page_idx += cluster_size; - cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; @@ -4597,6 +4645,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, iov_iter_count(to), READ);
+ /* In LFS mode, if there is inflight dio, wait for its completion */ + if (f2fs_lfs_mode(F2FS_I_SB(inode))) + inode_dio_wait(inode); + if (f2fs_should_use_dio(inode, iocb, to)) { ret = f2fs_dio_read_iter(iocb, to); } else { diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index aef57172014f..4729c49bf6d7 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -35,6 +35,11 @@ void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) if (f2fs_inode_dirtied(inode, sync)) return;
+ if (f2fs_is_atomic_file(inode)) { + set_inode_flag(inode, FI_ATOMIC_DIRTIED); + return; + } + mark_inode_dirty_sync(inode); }
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 38b4750475db..57d46e1439de 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -457,62 +457,6 @@ struct dentry *f2fs_get_parent(struct dentry *child) return d_obtain_alias(f2fs_iget(child->d_sb, ino)); }
-static int __recover_dot_dentries(struct inode *dir, nid_t pino) -{ - struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - struct qstr dot = QSTR_INIT(".", 1); - struct f2fs_dir_entry *de; - struct page *page; - int err = 0; - - if (f2fs_readonly(sbi->sb)) { - f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint", - dir->i_ino, pino); - return 0; - } - - if (!S_ISDIR(dir->i_mode)) { - f2fs_err(sbi, "inconsistent inode status, skip recovering inline_dots inode (ino:%lu, i_mode:%u, pino:%u)", - dir->i_ino, dir->i_mode, pino); - set_sbi_flag(sbi, SBI_NEED_FSCK); - return -ENOTDIR; - } - - err = f2fs_dquot_initialize(dir); - if (err) - return err; - - f2fs_balance_fs(sbi, true); - - f2fs_lock_op(sbi); - - de = f2fs_find_entry(dir, &dot, &page); - if (de) { - f2fs_put_page(page, 0); - } else if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out; - } else { - err = f2fs_do_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); - if (err) - goto out; - } - - de = f2fs_find_entry(dir, &dotdot_name, &page); - if (de) - f2fs_put_page(page, 0); - else if (IS_ERR(page)) - err = PTR_ERR(page); - else - err = f2fs_do_add_link(dir, &dotdot_name, NULL, pino, S_IFDIR); -out: - if (!err) - clear_inode_flag(dir, FI_INLINE_DOTS); - - f2fs_unlock_op(sbi); - return err; -} - static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { @@ -522,7 +466,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct dentry *new; nid_t ino = -1; int err = 0; - unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags); @@ -558,17 +501,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out; }
- if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { - err = __recover_dot_dentries(dir, root_ino); - if (err) - goto out_iput; - } - - if (f2fs_has_inline_dots(inode)) { - err = __recover_dot_dentries(inode, dir->i_ino); - if (err) - goto out_iput; - } if (IS_ENCRYPTED(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 78c3198a6308..418f2e663f6a 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -199,6 +199,10 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean) clear_inode_flag(inode, FI_ATOMIC_COMMITTED); clear_inode_flag(inode, FI_ATOMIC_REPLACE); clear_inode_flag(inode, FI_ATOMIC_FILE); + if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { + clear_inode_flag(inode, FI_ATOMIC_DIRTIED); + f2fs_mark_inode_dirty_sync(inode, true); + } stat_dec_atomic_inode(inode);
F2FS_I(inode)->atomic_write_task = NULL; @@ -366,6 +370,10 @@ static int __f2fs_commit_atomic_write(struct inode *inode) } else { sbi->committed_atomic_block += fi->atomic_write_cnt; set_inode_flag(inode, FI_ATOMIC_COMMITTED); + if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { + clear_inode_flag(inode, FI_ATOMIC_DIRTIED); + f2fs_mark_inode_dirty_sync(inode, true); + } }
__complete_revoke_list(inode, &revoke_list, ret ? true : false); @@ -1282,6 +1290,13 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, wait_list, issued); return 0; } + + /* + * Issue discard for conventional zones only if the device + * supports discard. + */ + if (!bdev_max_discard_sectors(bdev)) + return -EOPNOTSUPP; } #endif
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 3959fd137cc9..29754dc50fa4 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2561,7 +2561,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
static void f2fs_shutdown(struct super_block *sb) { - f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false); + f2fs_do_shutdown(F2FS_SB(sb), F2FS_GOING_DOWN_NOSYNC, false, false); }
#ifdef CONFIG_QUOTA @@ -3356,9 +3356,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, u32 segment_count = le32_to_cpu(raw_super->segment_count); u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); u64 main_end_blkaddr = main_blkaddr + - (segment_count_main << log_blocks_per_seg); + ((u64)segment_count_main << log_blocks_per_seg); u64 seg_end_blkaddr = segment0_blkaddr + - (segment_count << log_blocks_per_seg); + ((u64)segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) { f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", @@ -4173,12 +4173,14 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, }
f2fs_warn(sbi, "Remounting filesystem read-only"); + /* - * Make sure updated value of ->s_mount_flags will be visible before - * ->s_flags update + * We have already set CP_ERROR_FLAG flag to stop all updates + * to filesystem, so it doesn't need to set SB_RDONLY flag here + * because the flag should be set covered w/ sb->s_umount semaphore + * via remount procedure, otherwise, it will confuse code like + * freeze_super() which will lead to deadlocks and other problems. */ - smp_wmb(); - sb->s_flags |= SB_RDONLY; }
static void f2fs_record_error_work(struct work_struct *work) diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index f290fe9327c4..3f3874943679 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *ipage, int flags) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_xattr_entry *here, *last; void *base_addr, *last_base_addr; int found, newsize; @@ -772,9 +773,18 @@ static int __f2fs_setxattr(struct inode *inode, int index, if (index == F2FS_XATTR_INDEX_ENCRYPTION && !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT)) f2fs_set_encrypted_inode(inode); - if (S_ISDIR(inode->i_mode)) - set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
+ if (!S_ISDIR(inode->i_mode)) + goto same; + /* + * In restrict mode, fsync() always try to trigger checkpoint for all + * metadata consistency, in other mode, it triggers checkpoint when + * parent's xattr metadata was updated. + */ + if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) + set_sbi_flag(sbi, SBI_NEED_CP); + else + f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO); same: if (is_inode_flag_set(inode, FI_ACL_MODE)) { inode->i_mode = F2FS_I(inode)->i_acl_mode; diff --git a/fs/fcntl.c b/fs/fcntl.c index 300e5d9ad913..c28dc6c005f1 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -87,8 +87,8 @@ static int setfl(int fd, struct file * filp, unsigned int arg) return error; }
-static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, - int force) +void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, + int force) { write_lock_irq(&filp->f_owner.lock); if (force || !filp->f_owner.pid) { @@ -98,19 +98,13 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
if (pid) { const struct cred *cred = current_cred(); + security_file_set_fowner(filp); filp->f_owner.uid = cred->uid; filp->f_owner.euid = cred->euid; } } write_unlock_irq(&filp->f_owner.lock); } - -void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, - int force) -{ - security_file_set_fowner(filp); - f_modown(filp, pid, type, force); -} EXPORT_SYMBOL(__f_setown);
int f_setown(struct file *filp, int who, int force) @@ -146,7 +140,7 @@ EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp) { - f_modown(filp, NULL, PIDTYPE_TGID, 1); + __f_setown(filp, NULL, PIDTYPE_TGID, 1); }
pid_t f_getown(struct file *filp) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ed76121f73f2..08f7d538ca98 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1345,7 +1345,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
/* shared locks are not allowed with parallel page cache IO */ if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) - return false; + return true;
/* Parallel dio beyond EOF is not supported, at least for now. */ if (fuse_io_past_eof(iocb, from)) diff --git a/fs/inode.c b/fs/inode.c index 10c4619faeef..7125b73b5367 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -770,6 +770,10 @@ void evict_inodes(struct super_block *sb) continue;
spin_lock(&inode->i_lock); + if (atomic_read(&inode->i_count)) { + spin_unlock(&inode->i_lock); + continue; + } if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { spin_unlock(&inode->i_lock); continue; diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 5713994328cb..0625d1c0d064 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap) }
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); - if (!bmp->db_numag) { + if (!bmp->db_numag || bmp->db_numag >= MAXAG) { err = -EINVAL; goto err_release_metapage; } @@ -652,7 +652,7 @@ int dbNextAG(struct inode *ipbmap) * average free space. */ for (i = 0 ; i < bmp->db_numag; i++, agpref++) { - if (agpref == bmp->db_numag) + if (agpref >= bmp->db_numag) agpref = 0;
if (atomic_read(&bmp->db_active[agpref])) diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 1407feccbc2d..a360b24ed320 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -1360,7 +1360,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) /* get the ag number of this iag */ agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb)); dn_numag = JFS_SBI(pip->i_sb)->bmap->db_numag; - if (agno < 0 || agno > dn_numag) + if (agno < 0 || agno > dn_numag || agno >= MAXAG) return -EIO;
if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { diff --git a/fs/namespace.c b/fs/namespace.c index 328087a4df8a..155c6abda71d 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2921,8 +2921,15 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * if (!__mnt_is_readonly(mnt) && (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) && (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { - char *buf = (char *)__get_free_page(GFP_KERNEL); - char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); + char *buf, *mntpath; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf) + mntpath = d_path(mountpoint, buf, PAGE_SIZE); + else + mntpath = ERR_PTR(-ENOMEM); + if (IS_ERR(mntpath)) + mntpath = "(unknown)";
pr_warn("%s filesystem being %s at %s supports timestamps until %ptTd (0x%llx)\n", sb->s_type->name, @@ -2930,8 +2937,9 @@ static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount * mntpath, &sb->s_time_max, (unsigned long long)sb->s_time_max);
- free_page((unsigned long)buf); sb->s_iflags |= SB_I_TS_EXPIRY_WARNED; + if (buf) + free_page((unsigned long)buf); } }
diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 5f0f438e5d21..9d6b49dc6694 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -142,7 +142,7 @@ static int __init netfs_init(void)
error_fscache: error_procfile: - remove_proc_entry("fs/netfs", NULL); + remove_proc_subtree("fs/netfs", NULL); error_proc: mempool_exit(&netfs_subrequest_pool); error_subreqpool: @@ -159,7 +159,7 @@ fs_initcall(netfs_init); static void __exit netfs_exit(void) { fscache_exit(); - remove_proc_entry("fs/netfs", NULL); + remove_proc_subtree("fs/netfs", NULL); mempool_exit(&netfs_subrequest_pool); kmem_cache_destroy(netfs_subrequest_slab); mempool_exit(&netfs_request_pool); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b8ffbe52ba15..cd2fbde2e6d7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3904,6 +3904,18 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
+#define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ + (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) +static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) +{ + u32 share_access_want = res->open_caps.oa_share_access_want[0]; + u32 attr_bitmask = res->attr_bitmask[2]; + + return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && + ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == + FATTR4_WORD2_NFS42_TIME_DELEG_MASK); +} + static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { u32 minorversion = server->nfs_client->cl_minorversion; @@ -3982,8 +3994,6 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f #endif if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) server->caps |= NFS_CAP_FS_LOCATIONS; - if (res.attr_bitmask[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) - server->caps |= NFS_CAP_DELEGTIME; if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) @@ -4011,6 +4021,8 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f if (res.open_caps.oa_share_access_want[0] & NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) server->caps |= NFS_CAP_OPEN_XOR; + if (nfs4_server_delegtime_capable(&res)) + server->caps |= NFS_CAP_DELEGTIME;
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 877f682b45f2..30aba1dedaba 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1957,6 +1957,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov set_bit(ops->owner_flag_bit, &sp->so_flags); nfs4_put_state_owner(sp); status = nfs4_recovery_handle_error(clp, status); + nfs4_free_state_owners(&freeme); return (status != 0) ? status : -EAGAIN; }
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index f4704f5d4086..e2e248032bfd 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -1035,8 +1035,6 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, if (likely(ret == 0)) goto open_file;
- if (ret == -EEXIST) - goto retry; trace_nfsd_file_insert_err(rqstp, inode, may_flags, ret); status = nfserr_jukebox; goto construction_err; @@ -1051,6 +1049,7 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, status = nfserr_jukebox; goto construction_err; } + nfsd_file_put(nf); open_retry = false; fh_put(fhp); goto retry; diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 7a806ac13e31..8cca1329f348 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -581,6 +581,7 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr, .id = id, .type = type, }; + __be32 status = nfs_ok; __be32 *p; int ret; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); @@ -593,12 +594,16 @@ static __be32 idmap_id_to_name(struct xdr_stream *xdr, return nfserrno(ret); ret = strlen(item->name); WARN_ON_ONCE(ret > IDMAP_NAMESZ); + p = xdr_reserve_space(xdr, ret + 4); - if (!p) - return nfserr_resource; - p = xdr_encode_opaque(p, item->name, ret); + if (unlikely(!p)) { + status = nfserr_resource; + goto out_put; + } + xdr_encode_opaque(p, item->name, ret); +out_put: cache_put(&item->h, nn->idtoname_cache); - return 0; + return status; }
static bool diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 67d8673a9391..69a3a84e159e 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -809,6 +809,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, ci = &cmsg->cm_u.cm_clntinfo; if (get_user(namelen, &ci->cc_name.cn_len)) return -EFAULT; + if (!namelen) { + dprintk("%s: namelen should not be zero", __func__); + return -EINVAL; + } name.data = memdup_user(&ci->cc_name.cn_id, namelen); if (IS_ERR(name.data)) return PTR_ERR(name.data); @@ -831,6 +835,10 @@ __cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, cnm = &cmsg->cm_u.cm_name; if (get_user(namelen, &cnm->cn_len)) return -EFAULT; + if (!namelen) { + dprintk("%s: namelen should not be zero", __func__); + return -EINVAL; + } name.data = memdup_user(&cnm->cn_id, namelen); if (IS_ERR(name.data)) return PTR_ERR(name.data); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index a366fb1c1b9b..fe06779ea527 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -5912,6 +5912,28 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) } }
+static bool +nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh, + struct kstat *stat) +{ + struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file); + struct path path; + int rc; + + if (!nf) + return false; + + path.mnt = currentfh->fh_export->ex_path.mnt; + path.dentry = file_dentry(nf->nf_file); + + rc = vfs_getattr(&path, stat, + (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE), + AT_STATX_SYNC_AS_STAT); + + nfsd_file_put(nf); + return rc == 0; +} + /* * The Linux NFS server does not offer write delegations to NFSv4.0 * clients in order to avoid conflicts between write delegations and @@ -5947,7 +5969,6 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, int cb_up; int status = 0; struct kstat stat; - struct path path;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); open->op_recall = false; @@ -5983,20 +6004,16 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { - open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; - trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); - path.mnt = currentfh->fh_export->ex_path.mnt; - path.dentry = currentfh->fh_dentry; - if (vfs_getattr(&path, &stat, - (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE), - AT_STATX_SYNC_AS_STAT)) { + if (!nfs4_delegation_stat(dp, currentfh, &stat)) { nfs4_put_stid(&dp->dl_stid); destroy_delegation(dp); goto out_no_deleg; } + open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; dp->dl_cb_fattr.ncf_cur_fsize = stat.size; dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry)); + trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); } else { open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); @@ -8836,6 +8853,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry, __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct file_lock_context *ctx; + struct nfs4_delegation *dp = NULL; struct file_lease *fl; struct iattr attrs; struct nfs4_cb_fattr *ncf; @@ -8845,84 +8863,76 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry, ctx = locks_inode_context(inode); if (!ctx) return 0; + +#define NON_NFSD_LEASE ((void *)1) + spin_lock(&ctx->flc_lock); for_each_file_lock(fl, &ctx->flc_lease) { - unsigned char type = fl->c.flc_type; - if (fl->c.flc_flags == FL_LAYOUT) continue; - if (fl->fl_lmops != &nfsd_lease_mng_ops) { - /* - * non-nfs lease, if it's a lease with F_RDLCK then - * we are done; there isn't any write delegation - * on this inode - */ - if (type == F_RDLCK) - break; - - nfsd_stats_wdeleg_getattr_inc(nn); - spin_unlock(&ctx->flc_lock); - - status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); + if (fl->c.flc_type == F_WRLCK) { + if (fl->fl_lmops == &nfsd_lease_mng_ops) + dp = fl->c.flc_owner; + else + dp = NON_NFSD_LEASE; + } + break; + } + if (dp == NULL || dp == NON_NFSD_LEASE || + dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { + spin_unlock(&ctx->flc_lock); + if (dp == NON_NFSD_LEASE) { + status = nfserrno(nfsd_open_break_lease(inode, + NFSD_MAY_READ)); if (status != nfserr_jukebox || !nfsd_wait_for_delegreturn(rqstp, inode)) return status; - return 0; } - if (type == F_WRLCK) { - struct nfs4_delegation *dp = fl->c.flc_owner; + return 0; + }
- if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { - spin_unlock(&ctx->flc_lock); - return 0; - } - nfsd_stats_wdeleg_getattr_inc(nn); - dp = fl->c.flc_owner; - refcount_inc(&dp->dl_stid.sc_count); - ncf = &dp->dl_cb_fattr; - nfs4_cb_getattr(&dp->dl_cb_fattr); - spin_unlock(&ctx->flc_lock); - wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, - TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT); - if (ncf->ncf_cb_status) { - /* Recall delegation only if client didn't respond */ - status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); - if (status != nfserr_jukebox || - !nfsd_wait_for_delegreturn(rqstp, inode)) { - nfs4_put_stid(&dp->dl_stid); - return status; - } - } - if (!ncf->ncf_file_modified && - (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || - ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) - ncf->ncf_file_modified = true; - if (ncf->ncf_file_modified) { - int err; - - /* - * Per section 10.4.3 of RFC 8881, the server would - * not update the file's metadata with the client's - * modified size - */ - attrs.ia_mtime = attrs.ia_ctime = current_time(inode); - attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG; - inode_lock(inode); - err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL); - inode_unlock(inode); - if (err) { - nfs4_put_stid(&dp->dl_stid); - return nfserrno(err); - } - ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; - *size = ncf->ncf_cur_fsize; - *modified = true; - } - nfs4_put_stid(&dp->dl_stid); - return 0; + nfsd_stats_wdeleg_getattr_inc(nn); + refcount_inc(&dp->dl_stid.sc_count); + ncf = &dp->dl_cb_fattr; + nfs4_cb_getattr(&dp->dl_cb_fattr); + spin_unlock(&ctx->flc_lock); + + wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, + TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT); + if (ncf->ncf_cb_status) { + /* Recall delegation only if client didn't respond */ + status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); + if (status != nfserr_jukebox || + !nfsd_wait_for_delegreturn(rqstp, inode)) + goto out_status; + } + if (!ncf->ncf_file_modified && + (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || + ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) + ncf->ncf_file_modified = true; + if (ncf->ncf_file_modified) { + int err; + + /* + * Per section 10.4.3 of RFC 8881, the server would + * not update the file's metadata with the client's + * modified size + */ + attrs.ia_mtime = attrs.ia_ctime = current_time(inode); + attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG; + inode_lock(inode); + err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL); + inode_unlock(inode); + if (err) { + status = nfserrno(err); + goto out_status; } - break; + ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; + *size = ncf->ncf_cur_fsize; + *modified = true; } - spin_unlock(&ctx->flc_lock); - return 0; + status = 0; +out_status: + nfs4_put_stid(&dp->dl_stid); + return status; } diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 862bdf23120e..ef5061bb56da 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -350,7 +350,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || (flags & NILFS_BTREE_NODE_ROOT) || - nchildren < 0 || + nchildren <= 0 || nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { nilfs_crit(inode->i_sb, "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", @@ -381,7 +381,8 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || - nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { + nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX || + (nchildren == 0 && level > NILFS_BTREE_LEVEL_NODE_MIN))) { nilfs_crit(inode->i_sb, "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", inode->i_ino, level, flags, nchildren); @@ -1658,13 +1659,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) int nchildren, ret;
root = nilfs_btree_get_root(btree); + nchildren = nilfs_btree_node_get_nchildren(root); + if (unlikely(nchildren == 0)) + return 0; + switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; break; case 3: - nchildren = nilfs_btree_node_get_nchildren(root); if (nchildren > 1) return 0; ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, @@ -1673,12 +1677,12 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; + nchildren = nilfs_btree_node_get_nchildren(node); break; default: return 0; }
- nchildren = nilfs_btree_node_get_nchildren(node); maxkey = nilfs_btree_node_get_key(node, nchildren - 1); nextmaxkey = (nchildren > 1) ? nilfs_btree_node_get_key(node, nchildren - 2) : 0; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7ae885e6d5d7..d533b58e21c2 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2406,7 +2406,7 @@ static int vfs_setup_quota_inode(struct inode *inode, int type) int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, unsigned int flags) { - struct quota_format_type *fmt = find_quota_format(format_id); + struct quota_format_type *fmt; struct quota_info *dqopt = sb_dqopt(sb); int error;
@@ -2416,6 +2416,7 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) return -EINVAL;
+ fmt = find_quota_format(format_id); if (!fmt) return -ESRCH; if (!sb->dq_op || !sb->s_qcop || diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 9e859ba010cf..7cbd580120d1 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -496,7 +496,7 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp, int err = 0;
if (work->conn->connection_type) { - if (!(fp->daccess & FILE_WRITE_DATA_LE)) { + if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) { pr_err("no right to write(%pD)\n", fp->filp); err = -EACCES; goto out; @@ -1115,9 +1115,10 @@ static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen, struct ksmbd_readdir_data *buf;
buf = container_of(ctx, struct ksmbd_readdir_data, ctx); - buf->dirent_count++; + if (!is_dot_dotdot(name, namlen)) + buf->dirent_count++;
- return buf->dirent_count <= 2; + return !buf->dirent_count; }
/** @@ -1137,7 +1138,7 @@ int ksmbd_vfs_empty_dir(struct ksmbd_file *fp) readdir_data.dirent_count = 0;
err = iterate_dir(fp->filp, &readdir_data.ctx); - if (readdir_data.dirent_count > 2) + if (readdir_data.dirent_count) err = -ENOTEMPTY; else err = 0; @@ -1166,7 +1167,7 @@ static bool __caseless_lookup(struct dir_context *ctx, const char *name, if (cmp < 0) cmp = strncasecmp((char *)buf->private, name, namlen); if (!cmp) { - memcpy((char *)buf->private, name, namlen); + memcpy((char *)buf->private, name, buf->used); buf->dirent_count = 1; return false; } @@ -1234,10 +1235,7 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, char *filepath; size_t path_len, remain_len;
- filepath = kstrdup(name, GFP_KERNEL); - if (!filepath) - return -ENOMEM; - + filepath = name; path_len = strlen(filepath); remain_len = path_len;
@@ -1280,10 +1278,9 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, err = -EINVAL; out2: path_put(parent_path); -out1: - kfree(filepath); }
+out1: if (!err) { err = mnt_want_write(parent_path->mnt); if (err) { diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index b1571dd96310..5e0346142f98 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -193,6 +193,7 @@ */ #ifndef ACPI_NO_ERROR_MESSAGES #define AE_INFO _acpi_module_name, __LINE__ +#define ACPI_ONCE(_fn, _plist) { static char _done; if (!_done) { _done = 1; _fn _plist; } }
/* * Error reporting. Callers module and line number are inserted by AE_INFO, @@ -201,8 +202,10 @@ */ #define ACPI_INFO(plist) acpi_info plist #define ACPI_WARNING(plist) acpi_warning plist +#define ACPI_WARNING_ONCE(plist) ACPI_ONCE(acpi_warning, plist) #define ACPI_EXCEPTION(plist) acpi_exception plist #define ACPI_ERROR(plist) acpi_error plist +#define ACPI_ERROR_ONCE(plist) ACPI_ONCE(acpi_error, plist) #define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist #define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist #define ACPI_BIOS_ERROR(plist) acpi_bios_error plist @@ -214,8 +217,10 @@
#define ACPI_INFO(plist) #define ACPI_WARNING(plist) +#define ACPI_WARNING_ONCE(plist) #define ACPI_EXCEPTION(plist) #define ACPI_ERROR(plist) +#define ACPI_ERROR_ONCE(plist) #define ACPI_BIOS_WARNING(plist) #define ACPI_BIOS_EXCEPTION(plist) #define ACPI_BIOS_ERROR(plist) diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 930b6afba6f4..e1720d930666 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -64,6 +64,8 @@ struct cpc_desc { int cpu_id; int write_cmd_status; int write_cmd_id; + /* Lock used for RMW operations in cpc_write() */ + spinlock_t rmw_lock; struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; struct acpi_psd_package domain_info; struct kobject kobj; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3b94ec161e8c..70fa4ffc3879 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -694,6 +694,11 @@ enum bpf_type_flag { /* DYNPTR points to xdp_buff */ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+ /* Memory must be aligned on some architectures, used in combination with + * MEM_FIXED_SIZE. + */ + MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; @@ -731,8 +736,6 @@ enum bpf_arg_type { ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ - ARG_PTR_TO_INT, /* pointer to int */ - ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ @@ -919,6 +922,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); */ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; + bool is_ldsx; union { int ctx_field_size; struct { @@ -927,6 +931,7 @@ struct bpf_insn_access_aux { }; }; struct bpf_verifier_log *log; /* for verbose logs */ + bool is_retval; /* is accessing function return value ? */ };
static inline void diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index 1de7ece5d36d..aefcd6564251 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -9,6 +9,7 @@
#include <linux/sched.h> #include <linux/bpf.h> +#include <linux/bpf_verifier.h> #include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM @@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range); #else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) @@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, { }
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 2df665fa2964..1b7f02fd7599 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
/* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table") +#define __annotate_jump_table __section(".rodata..c_jump_table,"a",@progbits #")
#else /* !CONFIG_OBJTOOL */ #define annotate_reachable() diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 01bee2b289c2..c68e37201a12 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -278,7 +278,7 @@ struct node_footer { #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ -#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ +#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ #define F2FS_PIN_FILE 0x40 /* file should not be gced */ #define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 855db460e08b..19c333fafe11 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -114,6 +114,7 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask, unsigned int obj_type) LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode) LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) +LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security) LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index a2ade0ffe9e7..efd4a0655159 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -73,6 +73,7 @@ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; + int lbs_sock; int lbs_superblock; int lbs_ipc; int lbs_msg_msg; diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index c09cdcc99471..189140bf11fc 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -40,7 +40,7 @@ struct sbitmap_word { /** * @swap_lock: serializes simultaneous updates of ->word and ->cleared */ - spinlock_t swap_lock; + raw_spinlock_t swap_lock; } ____cacheline_aligned_in_smp;
/** diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h index 0f038a1a0330..c3bca9c0bf2c 100644 --- a/include/linux/soc/qcom/geni-se.h +++ b/include/linux/soc/qcom/geni-se.h @@ -88,11 +88,15 @@ struct geni_se { #define SE_GENI_M_IRQ_STATUS 0x610 #define SE_GENI_M_IRQ_EN 0x614 #define SE_GENI_M_IRQ_CLEAR 0x618 +#define SE_GENI_M_IRQ_EN_SET 0x61c +#define SE_GENI_M_IRQ_EN_CLEAR 0x620 #define SE_GENI_S_CMD0 0x630 #define SE_GENI_S_CMD_CTRL_REG 0x634 #define SE_GENI_S_IRQ_STATUS 0x640 #define SE_GENI_S_IRQ_EN 0x644 #define SE_GENI_S_IRQ_CLEAR 0x648 +#define SE_GENI_S_IRQ_EN_SET 0x64c +#define SE_GENI_S_IRQ_EN_CLEAR 0x650 #define SE_GENI_TX_FIFOn 0x700 #define SE_GENI_RX_FIFOn 0x780 #define SE_GENI_TX_FIFO_STATUS 0x800 @@ -101,6 +105,8 @@ struct geni_se { #define SE_GENI_RX_WATERMARK_REG 0x810 #define SE_GENI_RX_RFR_WATERMARK_REG 0x814 #define SE_GENI_IOS 0x908 +#define SE_GENI_M_GP_LENGTH 0x910 +#define SE_GENI_S_GP_LENGTH 0x914 #define SE_DMA_TX_IRQ_STAT 0xc40 #define SE_DMA_TX_IRQ_CLR 0xc44 #define SE_DMA_TX_FSM_RST 0xc58 @@ -234,6 +240,9 @@ struct geni_se { #define IO2_DATA_IN BIT(1) #define RX_DATA_IN BIT(0)
+/* SE_GENI_M_GP_LENGTH and SE_GENI_S_GP_LENGTH fields */ +#define GP_LENGTH GENMASK(31, 0) + /* SE_DMA_TX_IRQ_STAT Register fields */ #define TX_DMA_DONE BIT(0) #define TX_EOT BIT(1) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 9f08a584d707..0b9f1e598e3a 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -76,8 +76,23 @@ struct usbnet { # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 +/* This one is special, as it indicates that the device is going away + * there are cyclic dependencies between tasklet, timer and bh + * that must be broken + */ +# define EVENT_UNPLUG 31 };
+static inline bool usbnet_going_away(struct usbnet *ubn) +{ + return test_bit(EVENT_UNPLUG, &ubn->flags); +} + +static inline void usbnet_mark_going_away(struct usbnet *ubn) +{ + set_bit(EVENT_UNPLUG, &ubn->flags); +} + static inline struct usb_driver *driver_of(struct usb_interface *intf) { return to_usb_driver(intf->dev.driver); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 4b16844c6bc2..306137a15d07 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -118,7 +118,9 @@ struct virtio_admin_cmd { * struct virtio_device - representation of a device using virtio * @index: unique position on the virtio bus * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore) - * @config_enabled: configuration change reporting enabled + * @config_core_enabled: configuration change reporting enabled by core + * @config_driver_disabled: configuration change reporting disabled by + * a driver * @config_change_pending: configuration change reported while disabled * @config_lock: protects configuration change reporting * @vqs_list_lock: protects @vqs. @@ -135,7 +137,8 @@ struct virtio_admin_cmd { struct virtio_device { int index; bool failed; - bool config_enabled; + bool config_core_enabled; + bool config_driver_disabled; bool config_change_pending; spinlock_t config_lock; spinlock_t vqs_list_lock; @@ -166,6 +169,10 @@ void __virtqueue_break(struct virtqueue *_vq); void __virtqueue_unbreak(struct virtqueue *_vq);
void virtio_config_changed(struct virtio_device *dev); + +void virtio_config_driver_disable(struct virtio_device *dev); +void virtio_config_driver_enable(struct virtio_device *dev); + #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 1a32e602630e..88265d37aa72 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -2257,8 +2257,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, bool mgmt_connected); void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); -void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status); +void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, + u8 status); void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); diff --git a/include/net/ip.h b/include/net/ip.h index c5606cadb1a5..82248813619e 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -795,6 +795,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) }
bool icmp_global_allow(void); +void icmp_global_consume(void); + extern int sysctl_icmp_msgs_per_sec; extern int sysctl_icmp_msgs_burst;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0a04eaf5343c..fae37598c110 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -994,8 +994,9 @@ enum mac80211_tx_info_flags { * of their QoS TID or other priority field values. * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally * for sequence number assignment - * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted - * due to scanning, not in normal operation on the interface. + * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame + * which is transmitted due to scanning or offchannel TX, not in normal + * operation on the interface. * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this * frame should be transmitted on the specific link. This really is * only relevant for frames that do not have data present, and is @@ -1016,7 +1017,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), IEEE80211_TX_CTRL_DONT_REORDER = BIT(8), IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9), - IEEE80211_TX_CTRL_SCAN_TX = BIT(10), + IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10), IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000, };
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 1bfdd16890fa..2be4738eae1c 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1674,6 +1674,7 @@ struct nft_trans_rule {
struct nft_trans_set { struct nft_trans_binding nft_trans_binding; + struct list_head list_trans_newset; struct nft_set *set; u32 set_id; u32 gc_int; @@ -1875,6 +1876,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re struct nftables_pernet { struct list_head tables; struct list_head commit_list; + struct list_head commit_set_list; struct list_head binding_list; struct list_head module_list; struct list_head notify_list; diff --git a/include/net/tcp.h b/include/net/tcp.h index 2aac11e7e1cc..196c148fce8a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2434,9 +2434,26 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) { const struct sk_buff *skb = tcp_rtx_queue_head(sk); u32 rto = inet_csk(sk)->icsk_rto; - u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; + if (likely(skb)) { + u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); + + return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; + } else { + WARN_ONCE(1, + "rtx queue emtpy: " + "out:%u sacked:%u lost:%u retrans:%u " + "tlp_high_seq:%u sk_state:%u ca_state:%u " + "advmss:%u mss_cache:%u pmtu:%u\n", + tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, + tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, + tcp_sk(sk)->tlp_high_seq, sk->sk_state, + inet_csk(sk)->icsk_ca_state, + tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, + inet_csk(sk)->icsk_pmtu_cookie); + return jiffies_to_usecs(rto); + } + }
/* diff --git a/include/sound/tas2563-tlv.h b/include/sound/tas2563-tlv.h new file mode 100644 index 000000000000..faa3e194f73b --- /dev/null +++ b/include/sound/tas2563-tlv.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// +// ALSA SoC Texas Instruments TAS2563 Audio Smart Amplifier +// +// Copyright (C) 2022 - 2024 Texas Instruments Incorporated +// https://www.ti.com +// +// The TAS2563 driver implements a flexible and configurable +// algo coefficient setting for one, two, or even multiple +// TAS2563 chips. +// +// Author: Shenghao Ding shenghao-ding@ti.com +// + +#ifndef __TAS2563_TLV_H__ +#define __TAS2563_TLV_H__ + +static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1); + +/* pow(10, db/20) * pow(2,30) */ +static const unsigned char tas2563_dvc_table[][4] = { + { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */ + { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */ + { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */ + { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */ + { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */ + { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */ + { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */ + { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */ + { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */ + { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */ + { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */ + { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */ + { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */ + { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */ + { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */ + { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */ + { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */ + { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */ + { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */ + { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */ + { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */ + { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */ + { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */ + { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */ + { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */ + { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */ + { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */ + { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */ + { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */ + { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */ + { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */ + { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */ + { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */ + { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */ + { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */ + { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */ + { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */ + { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */ + { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */ + { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */ + { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */ + { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */ + { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */ + { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */ + { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */ + { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */ + { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */ + { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */ + { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */ + { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */ + { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */ + { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */ + { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */ + { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */ + { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */ + { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */ + { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */ + { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */ + { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */ + { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */ + { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */ + { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */ + { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */ + { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */ + { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */ + { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */ + { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */ + { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */ + { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */ + { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */ + { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */ + { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */ + { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */ + { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */ + { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */ + { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */ + { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */ + { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */ + { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */ + { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */ + { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */ + { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */ + { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */ + { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */ + { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */ + { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */ + { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */ + { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */ + { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */ + { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */ + { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */ + { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */ + { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */ + { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */ + { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */ + { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */ + { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */ + { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */ + { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */ + { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */ + { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */ + { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */ + { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */ + { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */ + { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */ + { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */ + { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */ + { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */ + { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */ + { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */ + { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */ + { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */ + { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */ + { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */ + { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */ + { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */ + { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */ + { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */ + { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */ + { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */ + { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */ + { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */ + { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */ + { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */ + { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */ + { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */ + { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */ + { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */ + { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */ + { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */ + { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */ + { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */ + { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */ + { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */ + { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */ + { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */ + { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */ + { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */ + { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */ + { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */ + { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */ + { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */ + { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */ + { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */ + { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */ + { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */ + { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */ + { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */ + { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */ + { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */ + { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */ + { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */ + { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */ + { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */ + { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */ + { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */ + { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */ + { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */ + { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */ + { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */ + { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */ + { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */ + { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */ + { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */ + { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */ + { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */ + { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */ + { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */ + { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */ + { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */ + { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */ + { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */ + { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */ + { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */ + { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */ + { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */ + { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */ + { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */ + { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */ + { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */ + { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */ + { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */ + { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */ + { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */ + { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */ + { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */ + { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */ + { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */ + { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */ + { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */ + { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */ + { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */ + { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */ + { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */ + { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */ + { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */ + { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */ + { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */ + { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */ + { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */ + { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */ + { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */ + { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */ + { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */ + { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */ + { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */ + { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */ + { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */ + { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */ + { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */ + { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */ + { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */ + { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */ + { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */ + { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */ + { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */ + { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */ + { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */ + { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */ + { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */ + { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */ + { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */ + { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */ + { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */ + { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */ + { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */ + { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */ + { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */ + { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */ + { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */ + { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */ + { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */ + { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */ + { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */ + { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */ + { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */ + { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */ + { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */ + { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */ + { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */ + { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */ + { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */ + { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */ + { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */ + { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */ + { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */ + { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */ + { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */ + { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */ + { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */ + { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */ + { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */ + { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */ + { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */ + { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */ + { 0X7F, 0XFF, 0XFF, 0XFF }, /* 6.0db */ +}; +#endif diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h index 00fd4d449ff3..d87263e43fdb 100644 --- a/include/sound/tas2781-tlv.h +++ b/include/sound/tas2781-tlv.h @@ -17,265 +17,5 @@
static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0); static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0); -static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
-/* pow(10, db/20) * pow(2,30) */ -static const __maybe_unused unsigned char tas2563_dvc_table[][4] = { - { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */ - { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */ - { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */ - { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */ - { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */ - { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */ - { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */ - { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */ - { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */ - { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */ - { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */ - { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */ - { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */ - { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */ - { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */ - { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */ - { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */ - { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */ - { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */ - { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */ - { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */ - { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */ - { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */ - { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */ - { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */ - { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */ - { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */ - { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */ - { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */ - { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */ - { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */ - { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */ - { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */ - { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */ - { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */ - { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */ - { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */ - { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */ - { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */ - { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */ - { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */ - { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */ - { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */ - { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */ - { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */ - { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */ - { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */ - { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */ - { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */ - { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */ - { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */ - { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */ - { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */ - { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */ - { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */ - { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */ - { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */ - { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */ - { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */ - { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */ - { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */ - { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */ - { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */ - { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */ - { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */ - { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */ - { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */ - { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */ - { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */ - { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */ - { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */ - { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */ - { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */ - { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */ - { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */ - { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */ - { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */ - { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */ - { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */ - { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */ - { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */ - { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */ - { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */ - { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */ - { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */ - { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */ - { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */ - { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */ - { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */ - { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */ - { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */ - { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */ - { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */ - { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */ - { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */ - { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */ - { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */ - { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */ - { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */ - { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */ - { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */ - { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */ - { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */ - { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */ - { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */ - { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */ - { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */ - { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */ - { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */ - { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */ - { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */ - { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */ - { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */ - { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */ - { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */ - { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */ - { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */ - { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */ - { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */ - { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */ - { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */ - { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */ - { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */ - { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */ - { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */ - { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */ - { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */ - { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */ - { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */ - { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */ - { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */ - { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */ - { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */ - { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */ - { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */ - { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */ - { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */ - { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */ - { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */ - { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */ - { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */ - { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */ - { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */ - { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */ - { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */ - { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */ - { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */ - { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */ - { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */ - { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */ - { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */ - { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */ - { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */ - { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */ - { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */ - { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */ - { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */ - { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */ - { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */ - { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */ - { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */ - { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */ - { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */ - { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */ - { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */ - { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */ - { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */ - { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */ - { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */ - { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */ - { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */ - { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */ - { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */ - { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */ - { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */ - { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */ - { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */ - { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */ - { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */ - { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */ - { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */ - { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */ - { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */ - { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */ - { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */ - { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */ - { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */ - { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */ - { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */ - { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */ - { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */ - { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */ - { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */ - { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */ - { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */ - { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */ - { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */ - { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */ - { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */ - { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */ - { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */ - { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */ - { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */ - { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */ - { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */ - { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */ - { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */ - { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */ - { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */ - { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */ - { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */ - { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */ - { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */ - { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */ - { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */ - { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */ - { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */ - { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */ - { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */ - { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */ - { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */ - { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */ - { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */ - { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */ - { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */ - { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */ - { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */ - { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */ - { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */ - { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */ - { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */ - { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */ - { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */ - { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */ - { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */ - { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */ - { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */ - { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */ - { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */ - { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */ - { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */ - { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */ - { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */ - { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */ - { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */ - { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */ - { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */ - { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */ - { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */ - { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */ - { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */ - { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */ - { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */ - { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */ - { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */ - { 0XFF, 0XFF, 0XFF, 0XFF }, /* 6.0db */ -}; #endif diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h index 18161d02a96f..dbda552398b5 100644 --- a/include/sound/tas2781.h +++ b/include/sound/tas2781.h @@ -81,11 +81,6 @@ struct tasdevice { bool is_loaderr; };
-struct tasdevice_irqinfo { - int irq_gpio; - int irq; -}; - struct calidata { unsigned char *data; unsigned long total_sz; @@ -93,7 +88,6 @@ struct calidata {
struct tasdevice_priv { struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS]; - struct tasdevice_irqinfo irq_info; struct tasdevice_rca rcabin; struct calidata cali_data; struct tasdevice_fw *fmw; @@ -115,6 +109,7 @@ struct tasdevice_priv { unsigned int chip_id; unsigned int sysclk;
+ int irq; int cur_prog; int cur_conf; int fw_state; diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index ed794b5fefbe..2851c823095b 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE); { CP_NODE_NEED_CP, "node needs cp" }, \ { CP_FASTBOOT_MODE, "fastboot mode" }, \ { CP_SPEC_LOG_NUM, "log type is 2" }, \ - { CP_RECOVER_DIR, "dir needs recovery" }) + { CP_RECOVER_DIR, "dir needs recovery" }, \ + { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \ __print_symbolic(type, \ diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index f1e7c670add8..a38f36b68060 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/rculist_nulls.h> #include <linux/cpu.h> +#include <linux/cpuset.h> #include <linux/task_work.h> #include <linux/audit.h> #include <linux/mmu_context.h> @@ -1167,7 +1168,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL)) goto err; - cpumask_copy(wq->cpu_mask, cpu_possible_mask); + cpuset_cpus_allowed(data->task, wq->cpu_mask); wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wq->acct[IO_WQ_ACCT_UNBOUND].max_workers = task_rlimit(current, RLIMIT_NPROC); @@ -1322,17 +1323,29 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) { + cpumask_var_t allowed_mask; + int ret = 0; + if (!tctx || !tctx->io_wq) return -EINVAL;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) + return -ENOMEM; + rcu_read_lock(); - if (mask) - cpumask_copy(tctx->io_wq->cpu_mask, mask); - else - cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask); + cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); + if (mask) { + if (cpumask_subset(mask, allowed_mask)) + cpumask_copy(tctx->io_wq->cpu_mask, mask); + else + ret = -EINVAL; + } else { + cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask); + } rcu_read_unlock();
- return 0; + free_cpumask_var(allowed_mask); + return ret; }
/* diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 3942db160f18..25112cf78e2b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2360,7 +2360,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return 1; if (unlikely(!llist_empty(&ctx->work_llist))) return 1; - if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) + if (unlikely(task_work_pending(current))) return 1; if (unlikely(task_sigpending(current))) return -EINTR; @@ -2463,9 +2463,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, * If we got woken because of task_work being processed, run it * now rather than let the caller do another wait loop. */ - io_run_task_work(); if (!llist_empty(&ctx->work_llist)) io_run_local_work(ctx, nr_wait); + io_run_task_work();
/* * Non-local task_work will be run on exit to userspace, but diff --git a/io_uring/rw.c b/io_uring/rw.c index c004d21e2f12..d85e2d41a992 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -855,6 +855,14 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
ret = io_iter_do_read(rw, &io->iter);
+ /* + * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT + * issue, even though they should be returning -EAGAIN. To be safe, + * retry from blocking context for either. + */ + if (ret == -EOPNOTSUPP && force_nonblock) + ret = -EAGAIN; + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; /* If we can poll, just do that. */ diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 3b50dc9586d1..fd6c29f90615 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/audit.h> #include <linux/security.h> +#include <linux/cpuset.h> #include <linux/io_uring.h>
#include <uapi/linux/io_uring.h> @@ -460,11 +461,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, return 0;
if (p->flags & IORING_SETUP_SQ_AFF) { + cpumask_var_t allowed_mask; int cpu = p->sq_thread_cpu;
ret = -EINVAL; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) goto err_sqpoll; + ret = -ENOMEM; + if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) + goto err_sqpoll; + ret = -EINVAL; + cpuset_cpus_allowed(current, allowed_mask); + if (!cpumask_test_cpu(cpu, allowed_mask)) { + free_cpumask_var(allowed_mask); + goto err_sqpoll; + } + free_cpumask_var(allowed_mask); sqd->sq_cpu = cpu; } else { sqd->sq_cpu = -1; diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 08a338e1f231..701092736826 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -11,7 +11,6 @@ #include <linux/lsm_hooks.h> #include <linux/bpf_lsm.h> #include <linux/kallsyms.h> -#include <linux/bpf_verifier.h> #include <net/bpf_sk_storage.h> #include <linux/bpf_local_storage.h> #include <linux/btf_ids.h> @@ -390,3 +389,36 @@ const struct bpf_verifier_ops lsm_verifier_ops = { .get_func_proto = bpf_lsm_func_proto, .is_valid_access = btf_ctx_access, }; + +/* hooks return 0 or 1 */ +BTF_SET_START(bool_lsm_hooks) +#ifdef CONFIG_SECURITY_NETWORK_XFRM +BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match) +#endif +#ifdef CONFIG_AUDIT +BTF_ID(func, bpf_lsm_audit_rule_known) +#endif +BTF_ID(func, bpf_lsm_inode_xattr_skipcap) +BTF_SET_END(bool_lsm_hooks) + +int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *retval_range) +{ + /* no return value range for void hooks */ + if (!prog->aux->attach_func_proto->type) + return -EINVAL; + + if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) { + retval_range->minval = 0; + retval_range->maxval = 1; + } else { + /* All other available LSM hooks, except task_prctl, return 0 + * on success and negative error code on failure. + * To keep things simple, we only allow bpf progs to return 0 + * or negative errno for task_prctl too. + */ + retval_range->minval = -MAX_ERRNO; + retval_range->maxval = 0; + } + return 0; +} diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a4e4f8d43ecf..7783b16b87cf 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6418,8 +6418,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (arg == nr_args) { switch (prog->expected_attach_type) { - case BPF_LSM_CGROUP: case BPF_LSM_MAC: + /* mark we are accessing the return value */ + info->is_retval = true; + fallthrough; + case BPF_LSM_CGROUP: case BPF_TRACE_FEXIT: /* When LSM programs are attached to void LSM hooks * they use FEXIT trampolines and when attached to @@ -8888,6 +8891,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, struct bpf_core_cand_list cands = {}; struct bpf_core_relo_res targ_res; struct bpf_core_spec *specs; + const struct btf_type *type; int err;
/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5" @@ -8897,6 +8901,13 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, if (!specs) return -ENOMEM;
+ type = btf_type_by_id(ctx->btf, relo->type_id); + if (!type) { + bpf_log(ctx->log, "relo #%u: bad type id %u\n", + relo_idx, relo->type_id); + return -EINVAL; + } + if (need_cands) { struct bpf_cand_cache *cc; int i; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b5f0adae8293..c9e235807cac 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -517,11 +517,12 @@ static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, }
BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, - long *, res) + s64 *, res) { long long _res; int err;
+ *res = 0; err = __bpf_strtoll(buf, buf_len, flags, &_res); if (err < 0) return err; @@ -538,16 +539,18 @@ const struct bpf_func_proto bpf_strtol_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(s64), };
BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, - unsigned long *, res) + u64 *, res) { unsigned long long _res; bool is_negative; int err;
+ *res = 0; err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); if (err < 0) return err; @@ -566,7 +569,8 @@ const struct bpf_func_proto bpf_strtoul_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(u64), };
BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index bf6c5f685ea2..d9cae8e25969 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -5932,6 +5932,7 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) { + *res = 0; if (flags) return -EINVAL;
@@ -5952,7 +5953,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { .arg1_type = ARG_PTR_TO_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg4_size = sizeof(u64), };
static const struct bpf_func_proto * diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d8520095ca03..8c07efa3905b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2334,6 +2334,25 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, __mark_reg_unknown(env, regs + regno); }
+static int __mark_reg_s32_range(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + u32 regno, + s32 s32_min, + s32 s32_max) +{ + struct bpf_reg_state *reg = regs + regno; + + reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); + reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); + + reg->smin_value = max_t(s64, reg->smin_value, s32_min); + reg->smax_value = min_t(s64, reg->smax_value, s32_max); + + reg_bounds_sync(reg); + + return reg_bounds_sanity_check(env, reg, "s32_range"); +} + static void __mark_reg_not_init(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) { @@ -5587,11 +5606,13 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type, - struct btf **btf, u32 *btf_id) + struct btf **btf, u32 *btf_id, bool *is_retval, bool is_ldsx) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, .log = &env->log, + .is_retval = false, + .is_ldsx = is_ldsx, };
if (env->ops->is_valid_access && @@ -5604,6 +5625,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, * type of narrower access. */ *reg_type = info.reg_type; + *is_retval = info.is_retval;
if (base_type(*reg_type) == PTR_TO_BTF_ID) { *btf = info.btf; @@ -6772,6 +6794,17 @@ static int check_stack_access_within_bounds( return grow_stack_state(env, state, -min_off /* size */); }
+static bool get_func_retval_range(struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + if (prog->type == BPF_PROG_TYPE_LSM && + prog->expected_attach_type == BPF_LSM_MAC && + !bpf_lsm_get_retval_range(prog, range)) { + return true; + } + return false; +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -6876,6 +6909,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { + bool is_retval = false; + struct bpf_retval_range range; enum bpf_reg_type reg_type = SCALAR_VALUE; struct btf *btf = NULL; u32 btf_id = 0; @@ -6891,7 +6926,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return err;
err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, - &btf_id); + &btf_id, &is_retval, is_ldsx); if (err) verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { @@ -6900,7 +6935,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) { - mark_reg_unknown(env, regs, value_regno); + if (is_retval && get_func_retval_range(env->prog, &range)) { + err = __mark_reg_s32_range(env, regs, value_regno, + range.minval, range.maxval); + if (err) + return err; + } else { + mark_reg_unknown(env, regs, value_regno); + } } else { mark_reg_known_zero(env, regs, value_regno); @@ -8128,6 +8170,12 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type) type == ARG_CONST_SIZE_OR_ZERO; }
+static bool arg_type_is_raw_mem(enum bpf_arg_type type) +{ + return base_type(type) == ARG_PTR_TO_MEM && + type & MEM_UNINIT; +} + static bool arg_type_is_release(enum bpf_arg_type type) { return type & OBJ_RELEASE; @@ -8138,16 +8186,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type) return base_type(type) == ARG_PTR_TO_DYNPTR; }
-static int int_ptr_type_to_size(enum bpf_arg_type type) -{ - if (type == ARG_PTR_TO_INT) - return sizeof(u32); - else if (type == ARG_PTR_TO_LONG) - return sizeof(u64); - - return -EINVAL; -} - static int resolve_map_arg_type(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_arg_type *arg_type) @@ -8220,16 +8258,6 @@ static const struct bpf_reg_types mem_types = { }, };
-static const struct bpf_reg_types int_ptr_types = { - .types = { - PTR_TO_STACK, - PTR_TO_PACKET, - PTR_TO_PACKET_META, - PTR_TO_MAP_KEY, - PTR_TO_MAP_VALUE, - }, -}; - static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE, @@ -8285,8 +8313,6 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, [ARG_PTR_TO_MEM] = &mem_types, [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, - [ARG_PTR_TO_INT] = &int_ptr_types, - [ARG_PTR_TO_LONG] = &int_ptr_types, [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK] = &stack_ptr_types, @@ -8847,9 +8873,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, */ meta->raw_mode = arg_type & MEM_UNINIT; if (arg_type & MEM_FIXED_SIZE) { - err = check_helper_mem_access(env, regno, - fn->arg_size[arg], false, - meta); + err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta); + if (err) + return err; + if (arg_type & MEM_ALIGNED) + err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); } break; case ARG_CONST_SIZE: @@ -8874,17 +8902,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; break; - case ARG_PTR_TO_INT: - case ARG_PTR_TO_LONG: - { - int size = int_ptr_type_to_size(arg_type); - - err = check_helper_mem_access(env, regno, size, false, meta); - if (err) - return err; - err = check_ptr_alignment(env, reg, 0, size, true); - break; - } case ARG_PTR_TO_CONST_STR: { err = check_reg_const_str(env, reg, regno); @@ -9201,15 +9218,15 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0;
- if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg1_type)) count++; - if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg2_type)) count++; - if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg3_type)) count++; - if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg4_type)) count++; - if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) + if (arg_type_is_raw_mem(fn->arg5_type)) count++;
/* We only support one arg being in raw mode at the moment, @@ -9923,9 +9940,13 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) return is_rbtree_lock_required_kfunc(kfunc_btf_id); }
-static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg) +static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg, + bool return_32bit) { - return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; + if (return_32bit) + return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; + else + return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; }
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) @@ -9962,8 +9983,8 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) if (err) return err;
- /* enforce R0 return value range */ - if (!retval_range_within(callee->callback_ret_range, r0)) { + /* enforce R0 return value range, and bpf_callback_t returns 64bit */ + if (!retval_range_within(callee->callback_ret_range, r0, false)) { verbose_invalid_scalar(env, r0, callee->callback_ret_range, "At callback return", "R0"); return -EINVAL; @@ -15569,6 +15590,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char int err; struct bpf_func_state *frame = env->cur_state->frame[0]; const bool is_subprog = frame->subprogno; + bool return_32bit = false;
/* LSM and struct_ops func-ptr's return type could be "void" */ if (!is_subprog || frame->in_exception_callback_fn) { @@ -15674,12 +15696,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_LSM: if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { - /* Regular BPF_PROG_TYPE_LSM programs can return - * any value. - */ - return 0; - } - if (!env->prog->aux->attach_func_proto->type) { + /* no range found, any return value is allowed */ + if (!get_func_retval_range(env->prog, &range)) + return 0; + /* no restricted range, any return value is allowed */ + if (range.minval == S32_MIN && range.maxval == S32_MAX) + return 0; + return_32bit = true; + } else if (!env->prog->aux->attach_func_proto->type) { /* Make sure programs that attach to void * hooks don't try to modify return value. */ @@ -15709,7 +15733,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char if (err) return err;
- if (!retval_range_within(range, reg)) { + if (!retval_range_within(range, reg, return_32bit)) { verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name); if (!is_subprog && prog->expected_attach_type == BPF_LSM_CGROUP && diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c index f5cb0ec45b9d..34aa63d7c9c6 100644 --- a/kernel/cgroup/pids.c +++ b/kernel/cgroup/pids.c @@ -244,7 +244,6 @@ static void pids_event(struct pids_cgroup *pids_forking, struct pids_cgroup *pids_over_limit) { struct pids_cgroup *p = pids_forking; - bool limit = false;
/* Only log the first time limit is hit. */ if (atomic64_inc_return(&p->events_local[PIDCG_FORKFAIL]) == 1) { @@ -252,20 +251,17 @@ static void pids_event(struct pids_cgroup *pids_forking, pr_cont_cgroup_path(p->css.cgroup); pr_cont("\n"); } - cgroup_file_notify(&p->events_local_file); if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) || - cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) + cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) { + cgroup_file_notify(&p->events_local_file); return; + }
- for (; parent_pids(p); p = parent_pids(p)) { - if (p == pids_over_limit) { - limit = true; - atomic64_inc(&p->events_local[PIDCG_MAX]); - cgroup_file_notify(&p->events_local_file); - } - if (limit) - atomic64_inc(&p->events[PIDCG_MAX]); + atomic64_inc(&pids_over_limit->events_local[PIDCG_MAX]); + cgroup_file_notify(&pids_over_limit->events_local_file);
+ for (p = pids_over_limit; parent_pids(p); p = parent_pids(p)) { + atomic64_inc(&p->events[PIDCG_MAX]); cgroup_file_notify(&p->events_file); } } diff --git a/kernel/kthread.c b/kernel/kthread.c index f7be976ff88a..db4ceb0f503c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -845,8 +845,16 @@ int kthread_worker_fn(void *worker_ptr) * event only cares about the address. */ trace_sched_kthread_work_execute_end(work, func); - } else if (!freezing(current)) + } else if (!freezing(current)) { schedule(); + } else { + /* + * Handle the case where the current remains + * TASK_INTERRUPTIBLE. try_to_freeze() expects + * the current to be TASK_RUNNING. + */ + __set_current_state(TASK_RUNNING); + }
try_to_freeze(); cond_resched(); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 0349f957e672..77e008239c6a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -6196,25 +6196,27 @@ static struct pending_free *get_pending_free(void) static void free_zapped_rcu(struct rcu_head *cb);
/* - * Schedule an RCU callback if no RCU callback is pending. Must be called with - * the graph lock held. - */ -static void call_rcu_zapped(struct pending_free *pf) +* See if we need to queue an RCU callback, must called with +* the lockdep lock held, returns false if either we don't have +* any pending free or the callback is already scheduled. +* Otherwise, a call_rcu() must follow this function call. +*/ +static bool prepare_call_rcu_zapped(struct pending_free *pf) { WARN_ON_ONCE(inside_selftest());
if (list_empty(&pf->zapped)) - return; + return false;
if (delayed_free.scheduled) - return; + return false;
delayed_free.scheduled = true;
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); delayed_free.index ^= 1;
- call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + return true; }
/* The caller must hold the graph lock. May be called from RCU context. */ @@ -6240,6 +6242,7 @@ static void free_zapped_rcu(struct rcu_head *ch) { struct pending_free *pf; unsigned long flags; + bool need_callback;
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) return; @@ -6251,14 +6254,18 @@ static void free_zapped_rcu(struct rcu_head *ch) pf = delayed_free.pf + (delayed_free.index ^ 1); __free_zapped_classes(pf); delayed_free.scheduled = false; + need_callback = + prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index); + lockdep_unlock(); + raw_local_irq_restore(flags);
/* - * If there's anything on the open list, close and start a new callback. - */ - call_rcu_zapped(delayed_free.pf + delayed_free.index); + * If there's pending free and its callback has not been scheduled, + * queue an RCU callback. + */ + if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
- lockdep_unlock(); - raw_local_irq_restore(flags); }
/* @@ -6298,6 +6305,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) { struct pending_free *pf; unsigned long flags; + bool need_callback;
init_data_structures_once();
@@ -6305,10 +6313,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) lockdep_lock(); pf = get_pending_free(); __lockdep_free_key_range(pf, start, size); - call_rcu_zapped(pf); + need_callback = prepare_call_rcu_zapped(pf); lockdep_unlock(); raw_local_irq_restore(flags); - + if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); /* * Wait for any possible iterators from look_up_lock_class() to pass * before continuing to free the memory they refer to. @@ -6402,6 +6411,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) struct pending_free *pf; unsigned long flags; int locked; + bool need_callback = false;
raw_local_irq_save(flags); locked = graph_lock(); @@ -6410,11 +6420,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
pf = get_pending_free(); __lockdep_reset_lock(pf, lock); - call_rcu_zapped(pf); + need_callback = prepare_call_rcu_zapped(pf);
graph_unlock(); out_irq: raw_local_irq_restore(flags); + if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); }
/* @@ -6458,6 +6470,7 @@ void lockdep_unregister_key(struct lock_class_key *key) struct pending_free *pf; unsigned long flags; bool found = false; + bool need_callback = false;
might_sleep();
@@ -6478,11 +6491,14 @@ void lockdep_unregister_key(struct lock_class_key *key) if (found) { pf = get_pending_free(); __lockdep_free_key_range(pf, key, 1); - call_rcu_zapped(pf); + need_callback = prepare_call_rcu_zapped(pf); } lockdep_unlock(); raw_local_irq_restore(flags);
+ if (need_callback) + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ synchronize_rcu(); } diff --git a/kernel/module/Makefile b/kernel/module/Makefile index a10b2b9a6fdf..50ffcc413b54 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -5,7 +5,7 @@
# These are called from save_stack_trace() on slub debug path, # and produce insane amounts of uninteresting coverage. -KCOV_INSTRUMENT_module.o := n +KCOV_INSTRUMENT_main.o := n
obj-y += main.o obj-y += strict_rwx.o diff --git a/kernel/padata.c b/kernel/padata.c index 0fa6c2895460..d899f34558af 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata) /* Sort in ascending order of sequence number. */ list_for_each_prev(pos, &reorder->list) { cur = list_entry(pos, struct padata_priv, list); - if (cur->seq_nr < padata->seq_nr) + /* Compare by difference to consider integer wrap around */ + if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) break; } list_add(&padata->list, pos); @@ -512,9 +513,12 @@ void __init padata_do_multithreaded(struct padata_mt_job *job) * thread function. Load balance large jobs between threads by * increasing the number of chunks, guarantee at least the minimum * chunk size from the caller, and honor the caller's alignment. + * Ensure chunk_size is at least 1 to prevent divide-by-0 + * panic in padata_mt_helper(). */ ps.chunk_size = job->size / (ps.nworks * load_balance_factor); ps.chunk_size = max(ps.chunk_size, job->min_chunk); + ps.chunk_size = max(ps.chunk_size, 1ul); ps.chunk_size = roundup(ps.chunk_size, job->align);
/* diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 3ce30841119a..2686ba122fa0 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -220,7 +220,10 @@ static bool __wake_nocb_gp(struct rcu_data *rdp_gp, raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); if (needwake) { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); - wake_up_process(rdp_gp->nocb_gp_kthread); + if (cpu_is_offline(raw_smp_processor_id())) + swake_up_one_online(&rdp_gp->nocb_gp_wq); + else + wake_up_process(rdp_gp->nocb_gp_kthread); }
return needwake; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f59e5c19d944..c5a3691ba6cc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1599,46 +1599,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); }
-static inline struct sched_statistics * +static __always_inline struct sched_statistics * __schedstats_from_dl_se(struct sched_dl_entity *dl_se) { + if (!schedstat_enabled()) + return NULL; + + if (dl_server(dl_se)) + return NULL; + return &dl_task_of(dl_se)->stats; }
static inline void update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); }
static inline void update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); }
static inline void update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); }
static inline void diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9057584ec06d..1d2cbdb162a6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -511,7 +511,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
static int se_is_idle(struct sched_entity *se) { - return 0; + return task_has_idle_policy(task_of(se)); }
#endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -3188,6 +3188,15 @@ static bool vma_is_accessed(struct mm_struct *mm, struct vm_area_struct *vma) return true; }
+ /* + * This vma has not been accessed for a while, and if the number + * the threads in the same process is low, which means no other + * threads can help scan this vma, force a vma scan. + */ + if (READ_ONCE(mm->numa_scan_seq) > + (vma->numab_state->prev_scan_seq + get_nr_threads(current))) + return true; + return false; }
@@ -8381,16 +8390,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int if (test_tsk_need_resched(curr)) return;
- /* Idle tasks are by definition preempted by non-idle tasks. */ - if (unlikely(task_has_idle_policy(curr)) && - likely(!task_has_idle_policy(p))) - goto preempt; - - /* - * Batch and idle tasks do not preempt non-idle tasks (their preemption - * is driven by the tick): - */ - if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) + if (!sched_feat(WAKEUP_PREEMPTION)) return;
find_matching_se(&se, &pse); @@ -8400,7 +8400,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int pse_is_idle = se_is_idle(pse);
/* - * Preempt an idle group in favor of a non-idle group (and don't preempt + * Preempt an idle entity in favor of a non-idle entity (and don't preempt * in the inverse case). */ if (cse_is_idle && !pse_is_idle) @@ -8408,9 +8408,14 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int if (cse_is_idle != pse_is_idle) return;
+ /* + * BATCH and IDLE tasks do not preempt others. + */ + if (unlikely(p->policy != SCHED_NORMAL)) + return; + cfs_rq = cfs_rq_of(se); update_curr(cfs_rq); - /* * XXX pick_eevdf(cfs_rq) != se ? */ @@ -9360,9 +9365,10 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
+ /* hw_pressure doesn't care about invariance */ decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | - update_hw_load_avg(now, rq, hw_pressure) | + update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) | update_irq_load_avg(rq, 0);
if (others_have_blocked(rq)) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index cd098846e251..add26dc27d7e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1226,7 +1226,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_LONG, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u64), };
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) @@ -1242,7 +1243,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = { .func = get_func_ret, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_LONG, + .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg2_size = sizeof(u64), };
BPF_CALL_1(get_func_arg_cnt, void *, ctx) @@ -3485,17 +3487,20 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr uprobes[i].ref_ctr_offset, &uprobes[i].consumer); if (err) { - bpf_uprobe_unregister(&path, uprobes, i); - goto error_free; + link->cnt = i; + goto error_unregister; } }
err = bpf_link_prime(&link->link, &link_primer); if (err) - goto error_free; + goto error_unregister;
return bpf_link_settle(&link_primer);
+error_unregister: + bpf_uprobe_unregister(&path, uprobes, link->cnt); + error_free: kvfree(uprobes); kfree(link); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 7cea91e193a8..1ea8af72849c 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -142,13 +142,14 @@ static void fill_pool(void) * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical * sections. */ - while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { + while (READ_ONCE(obj_nr_tofree) && + READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { raw_spin_lock_irqsave(&pool_lock, flags); /* * Recheck with the lock held as the worker thread might have * won the race and freed the global free list already. */ - while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { + while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 5e2e93307f0d..d3412984170c 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -65,7 +65,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, { unsigned long mask, word_mask;
- guard(spinlock_irqsave)(&map->swap_lock); + guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) { if (depth == 0) @@ -136,7 +136,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, }
for (i = 0; i < sb->map_nr; i++) - spin_lock_init(&sb->map[i].swap_lock); + raw_spin_lock_init(&sb->map[i].swap_lock);
return 0; } diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c index 88a2c35e1b59..5627b00fca29 100644 --- a/lib/xz/xz_crc32.c +++ b/lib/xz/xz_crc32.c @@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void) { - const uint32_t poly = CRC32_POLY_LE; + const uint32_t poly = 0xEDB88320;
uint32_t i; uint32_t j; diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h index bf1e94ec7873..d9fd49b45fd7 100644 --- a/lib/xz/xz_private.h +++ b/lib/xz/xz_private.h @@ -105,10 +105,6 @@ # endif #endif
-#ifndef CRC32_POLY_LE -#define CRC32_POLY_LE 0xedb88320 -#endif - /* * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used * before calling xz_dec_lzma2_run(). diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 58829baf8b5d..a0036dc78a3b 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -126,6 +126,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, * If this is too slow, it can be optimised to examine the maple * tree gaps. */ + rcu_read_lock(); for_each_vma(vmi, vma) { unsigned long gap;
@@ -146,6 +147,7 @@ static int __damon_va_three_regions(struct mm_struct *mm, next: prev = vma; } + rcu_read_unlock();
if (!sz_range(&second_gap) || !sz_range(&first_gap)) return -EINVAL; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 67c86a5d64a6..99b146d16a18 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -220,6 +220,8 @@ static bool get_huge_zero_page(void) count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); return false; } + /* Ensure zero folio won't have large_rmappable flag set. */ + folio_clear_large_rmappable(zero_folio); preempt_disable(); if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) { preempt_enable(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index aaf508be0a2b..c4176574ed9e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3921,100 +3921,124 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, return 0; }
-static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) +static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, + struct list_head *src_list) { - int i, nid = folio_nid(folio); - struct hstate *target_hstate; - struct page *subpage; - struct folio *inner_folio; - int rc = 0; - - target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); + long rc; + struct folio *folio, *next; + LIST_HEAD(dst_list); + LIST_HEAD(ret_list);
- remove_hugetlb_folio(h, folio, false); - spin_unlock_irq(&hugetlb_lock); - - /* - * If vmemmap already existed for folio, the remove routine above would - * have cleared the hugetlb folio flag. Hence the folio is technically - * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be - * passed hugetlb folios and will BUG otherwise. - */ - if (folio_test_hugetlb(folio)) { - rc = hugetlb_vmemmap_restore_folio(h, folio); - if (rc) { - /* Allocation of vmemmmap failed, we can not demote folio */ - spin_lock_irq(&hugetlb_lock); - add_hugetlb_folio(h, folio, false); - return rc; - } - } - - /* - * Use destroy_compound_hugetlb_folio_for_demote for all huge page - * sizes as it will not ref count folios. - */ - destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); + rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list); + list_splice_init(&ret_list, src_list);
/* * Taking target hstate mutex synchronizes with set_max_huge_pages. * Without the mutex, pages added to target hstate could be marked * as surplus. * - * Note that we already hold h->resize_lock. To prevent deadlock, + * Note that we already hold src->resize_lock. To prevent deadlock, * use the convention of always taking larger size hstate mutex first. */ - mutex_lock(&target_hstate->resize_lock); - for (i = 0; i < pages_per_huge_page(h); - i += pages_per_huge_page(target_hstate)) { - subpage = folio_page(folio, i); - inner_folio = page_folio(subpage); - if (hstate_is_gigantic(target_hstate)) - prep_compound_gigantic_folio_for_demote(inner_folio, - target_hstate->order); - else - prep_compound_page(subpage, target_hstate->order); - folio_change_private(inner_folio, NULL); - prep_new_hugetlb_folio(target_hstate, inner_folio, nid); - free_huge_folio(inner_folio); + mutex_lock(&dst->resize_lock); + + list_for_each_entry_safe(folio, next, src_list, lru) { + int i; + + if (folio_test_hugetlb_vmemmap_optimized(folio)) + continue; + + list_del(&folio->lru); + /* + * Use destroy_compound_hugetlb_folio_for_demote for all huge page + * sizes as it will not ref count folios. + */ + destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(src)); + + for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { + struct page *page = folio_page(folio, i); + + if (hstate_is_gigantic(dst)) + prep_compound_gigantic_folio_for_demote(page_folio(page), + dst->order); + else + prep_compound_page(page, dst->order); + set_page_private(page, 0); + + init_new_hugetlb_folio(dst, page_folio(page)); + list_add(&page->lru, &dst_list); + } } - mutex_unlock(&target_hstate->resize_lock);
- spin_lock_irq(&hugetlb_lock); + prep_and_add_allocated_folios(dst, &dst_list);
- /* - * Not absolutely necessary, but for consistency update max_huge_pages - * based on pool changes for the demoted page. - */ - h->max_huge_pages--; - target_hstate->max_huge_pages += - pages_per_huge_page(h) / pages_per_huge_page(target_hstate); + mutex_unlock(&dst->resize_lock);
return rc; }
-static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) +static long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed, + unsigned long nr_to_demote) __must_hold(&hugetlb_lock) { int nr_nodes, node; - struct folio *folio; + struct hstate *dst; + long rc = 0; + long nr_demoted = 0;
lockdep_assert_held(&hugetlb_lock);
/* We should never get here if no demote order */ - if (!h->demote_order) { + if (!src->demote_order) { pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); return -EINVAL; /* internal error */ } + dst = size_to_hstate(PAGE_SIZE << src->demote_order);
- for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { - list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { + for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) { + LIST_HEAD(list); + struct folio *folio, *next; + + list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) { if (folio_test_hwpoison(folio)) continue; - return demote_free_hugetlb_folio(h, folio); + + remove_hugetlb_folio(src, folio, false); + list_add(&folio->lru, &list); + + if (++nr_demoted == nr_to_demote) + break; } + + spin_unlock_irq(&hugetlb_lock); + + rc = demote_free_hugetlb_folios(src, dst, &list); + + spin_lock_irq(&hugetlb_lock); + + list_for_each_entry_safe(folio, next, &list, lru) { + list_del(&folio->lru); + add_hugetlb_folio(src, folio, false); + + nr_demoted--; + } + + if (rc < 0 || nr_demoted == nr_to_demote) + break; }
+ /* + * Not absolutely necessary, but for consistency update max_huge_pages + * based on pool changes for the demoted page. + */ + src->max_huge_pages -= nr_demoted; + dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst)); + + if (rc < 0) + return rc; + + if (nr_demoted) + return nr_demoted; /* * Only way to get here is if all pages on free lists are poisoned. * Return -EBUSY so that caller will not retry. @@ -4249,6 +4273,8 @@ static ssize_t demote_store(struct kobject *kobj, spin_lock_irq(&hugetlb_lock);
while (nr_demote) { + long rc; + /* * Check for available pages to demote each time thorough the * loop as demote_pool_huge_page will drop hugetlb_lock. @@ -4261,11 +4287,13 @@ static ssize_t demote_store(struct kobject *kobj, if (!nr_available) break;
- err = demote_pool_huge_page(h, n_mask); - if (err) + rc = demote_pool_huge_page(h, n_mask, nr_demote); + if (rc < 0) { + err = rc; break; + }
- nr_demote--; + nr_demote -= rc; }
spin_unlock_irq(&hugetlb_lock); @@ -6048,7 +6076,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, * When the original hugepage is shared one, it does not have * anon_vma prepared. */ - ret = vmf_anon_prepare(vmf); + ret = __vmf_anon_prepare(vmf); if (unlikely(ret)) goto out_release_all;
@@ -6247,7 +6275,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, }
if (!(vma->vm_flags & VM_MAYSHARE)) { - ret = vmf_anon_prepare(vmf); + ret = __vmf_anon_prepare(vmf); if (unlikely(ret)) goto out; } @@ -6378,6 +6406,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, folio_unlock(folio); out: hugetlb_vma_unlock_read(vma); + + /* + * We must check to release the per-VMA lock. __vmf_anon_prepare() is + * the only way ret can be set to VM_FAULT_RETRY. + */ + if (unlikely(ret & VM_FAULT_RETRY)) + vma_end_read(vma); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); return ret;
@@ -6599,6 +6635,14 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, } out_mutex: hugetlb_vma_unlock_read(vma); + + /* + * We must check to release the per-VMA lock. __vmf_anon_prepare() in + * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. + */ + if (unlikely(ret & VM_FAULT_RETRY)) + vma_end_read(vma); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But diff --git a/mm/internal.h b/mm/internal.h index b4d86436565b..a963f67d3452 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -310,7 +310,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat) wake_up(wqh); }
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf); +vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf); +static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) +{ + vm_fault_t ret = __vmf_anon_prepare(vmf); + + if (unlikely(ret & VM_FAULT_RETRY)) + vma_end_read(vmf->vma); + return ret; +} + vm_fault_t do_swap_page(struct vm_fault *vmf); void folio_rotate_reclaimable(struct folio *folio); bool __folio_end_writeback(struct folio *folio); diff --git a/mm/memory.c b/mm/memory.c index ebfc9768f801..cda2c12c500b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3276,7 +3276,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) }
/** - * vmf_anon_prepare - Prepare to handle an anonymous fault. + * __vmf_anon_prepare - Prepare to handle an anonymous fault. * @vmf: The vm_fault descriptor passed from the fault handler. * * When preparing to insert an anonymous page into a VMA from a @@ -3290,7 +3290,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) * Return: 0 if fault handling can proceed. Any other value should be * returned to the caller. */ -vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) +vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret = 0; @@ -3298,10 +3298,8 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf) if (likely(vma->anon_vma)) return 0; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { - if (!mmap_read_trylock(vma->vm_mm)) { - vma_end_read(vma); + if (!mmap_read_trylock(vma->vm_mm)) return VM_FAULT_RETRY; - } } if (__anon_vma_prepare(vma)) ret = VM_FAULT_OOM; diff --git a/mm/migrate.c b/mm/migrate.c index 923ea80ba744..368ab3878fa6 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1118,7 +1118,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, int rc = -EAGAIN; int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__folio_test_movable(src); + bool is_lru = data_race(!__folio_test_movable(src)); bool locked = false; bool dst_locked = false;
diff --git a/mm/mmap.c b/mm/mmap.c index d0dfc85b209b..18fddcce03b8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3198,8 +3198,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, flags |= MAP_LOCKED;
file = get_file(vma->vm_file); + ret = security_mmap_file(vma->vm_file, prot, flags); + if (ret) + goto out_fput; ret = do_mmap(vma->vm_file, start, size, prot, flags, 0, pgoff, &populate, NULL); +out_fput: fput(file); out: mmap_write_unlock(mm); diff --git a/mm/util.c b/mm/util.c index bd283e2132e0..baca6cafc9f1 100644 --- a/mm/util.c +++ b/mm/util.c @@ -463,7 +463,7 @@ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) if (gap + pad > gap) gap += pad;
- if (gap < MIN_GAP) + if (gap < MIN_GAP && MIN_GAP < MAX_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index c82502e213a8..58b528df1a86 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -106,8 +106,7 @@ void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) * where a timeout + cancel does indicate an actual failure. */ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, status); + mgmt_connect_failed(hdev, conn, status);
/* The connection attempt was doing scan for new RPA, and is * in scan phase. If params are not associated with any other @@ -1250,8 +1249,7 @@ void hci_conn_failed(struct hci_conn *conn, u8 status) hci_le_conn_failed(conn, status); break; case ACL_LINK: - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, status); + mgmt_connect_failed(hdev, conn, status); break; }
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 5533e6f561b3..40ccdef168d7 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -5380,7 +5380,10 @@ int hci_stop_discovery_sync(struct hci_dev *hdev) if (!e) return 0;
- return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + /* Ignore cancel errors since it should interfere with stopping + * of the discovery. + */ + hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); }
return 0; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 279902e8bd8a..e4f564d6f6fb 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -9779,13 +9779,18 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_pending_remove(cmd); }
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u8 status) +void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status) { struct mgmt_ev_connect_failed ev;
- bacpy(&ev.addr.bdaddr, bdaddr); - ev.addr.type = link_to_bdaddr(link_type, addr_type); + if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { + mgmt_device_disconnected(hdev, &conn->dst, conn->type, + conn->dst_type, status, true); + return; + } + + bacpy(&ev.addr.bdaddr, &conn->dst); + ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev.status = mgmt_status(status);
mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); diff --git a/net/can/bcm.c b/net/can/bcm.c index 46d3ec3aa44b..217049fa496e 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1471,8 +1471,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, /* remove device reference, if this is our bound device */ if (bo->bound && bo->ifindex == dev->ifindex) { #if IS_ENABLED(CONFIG_PROC_FS) - if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) + if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) { remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); + bo->bcm_proc_read = NULL; + } #endif bo->bound = 0; bo->ifindex = 0; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 4be73de5033c..319f47df3330 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1179,10 +1179,10 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) break; case -ENETDOWN: /* In this case we should get a netdev_event(), all active - * sessions will be cleared by - * j1939_cancel_all_active_sessions(). So handle this as an - * error, but let j1939_cancel_all_active_sessions() do the - * cleanup including propagation of the error to user space. + * sessions will be cleared by j1939_cancel_active_session(). + * So handle this as an error, but let + * j1939_cancel_active_session() do the cleanup including + * propagation of the error to user space. */ break; case -EOVERFLOW: diff --git a/net/core/filter.c b/net/core/filter.c index f3c72cf86099..0e719c7c43bb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6262,20 +6262,25 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, int ret = BPF_MTU_CHK_RET_FRAG_NEEDED; struct net_device *dev = skb->dev; int skb_len, dev_len; - int mtu; + int mtu = 0;
- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) - return -EINVAL; + if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) { + ret = -EINVAL; + goto out; + }
- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) - return -EINVAL; + if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) { + ret = -EINVAL; + goto out; + }
dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) - return -ENODEV; + if (unlikely(!dev)) { + ret = -ENODEV; + goto out; + }
mtu = READ_ONCE(dev->mtu); - dev_len = mtu + dev->hard_header_len;
/* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ @@ -6293,15 +6298,12 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb, */ if (skb_is_gso(skb)) { ret = BPF_MTU_CHK_RET_SUCCESS; - if (flags & BPF_MTU_CHK_SEGS && !skb_gso_validate_network_len(skb, mtu)) ret = BPF_MTU_CHK_RET_SEGS_TOOBIG; } out: - /* BPF verifier guarantees valid pointer */ *mtu_len = mtu; - return ret; }
@@ -6311,19 +6313,21 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, struct net_device *dev = xdp->rxq->dev; int xdp_len = xdp->data_end - xdp->data; int ret = BPF_MTU_CHK_RET_SUCCESS; - int mtu, dev_len; + int mtu = 0, dev_len;
/* XDP variant doesn't support multi-buffer segment check (yet) */ - if (unlikely(flags)) - return -EINVAL; + if (unlikely(flags)) { + ret = -EINVAL; + goto out; + }
dev = __dev_via_ifindex(dev, ifindex); - if (unlikely(!dev)) - return -ENODEV; + if (unlikely(!dev)) { + ret = -ENODEV; + goto out; + }
mtu = READ_ONCE(dev->mtu); - - /* Add L2-header as dev MTU is L3 size */ dev_len = mtu + dev->hard_header_len;
/* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ @@ -6333,10 +6337,8 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp, xdp_len += len_diff; /* minus result pass check */ if (xdp_len > dev_len) ret = BPF_MTU_CHK_RET_FRAG_NEEDED; - - /* BPF verifier guarantees valid pointer */ +out: *mtu_len = mtu; - return ret; }
@@ -6346,7 +6348,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6357,7 +6360,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -8579,13 +8583,16 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (off + size > offsetofend(struct __sk_buff, cb[4])) return false; break; + case bpf_ctx_range(struct __sk_buff, data): + case bpf_ctx_range(struct __sk_buff, data_meta): + case bpf_ctx_range(struct __sk_buff, data_end): + if (info->is_ldsx || size != size_default) + return false; + break; case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): - case bpf_ctx_range(struct __sk_buff, data): - case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; break; @@ -9029,6 +9036,14 @@ static bool xdp_is_valid_access(int off, int size, } } return false; + } else { + switch (off) { + case offsetof(struct xdp_md, data_meta): + case offsetof(struct xdp_md, data): + case offsetof(struct xdp_md, data_end): + if (info->is_ldsx) + return false; + } }
switch (off) { @@ -9354,12 +9369,12 @@ static bool flow_dissector_is_valid_access(int off, int size,
switch (off) { case bpf_ctx_range(struct __sk_buff, data): - if (size != size_default) + if (info->is_ldsx || size != size_default) return false; info->reg_type = PTR_TO_PACKET; return true; case bpf_ctx_range(struct __sk_buff, data_end): - if (size != size_default) + if (info->is_ldsx || size != size_default) return false; info->reg_type = PTR_TO_PACKET_END; return true; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index d3dbb92153f2..724b6856fcc3 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1183,6 +1183,7 @@ static void sock_hash_free(struct bpf_map *map) sock_put(elem->sk); sock_hash_free_elem(htab, elem); } + cond_resched(); }
/* wait for psock readers accessing its map link */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ab6d0d98dbc3..336518e623b2 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -224,57 +224,59 @@ int sysctl_icmp_msgs_per_sec __read_mostly = 1000; int sysctl_icmp_msgs_burst __read_mostly = 50;
static struct { - spinlock_t lock; - u32 credit; + atomic_t credit; u32 stamp; -} icmp_global = { - .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock), -}; +} icmp_global;
/** * icmp_global_allow - Are we allowed to send one more ICMP message ? * * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. * Returns false if we reached the limit and can not send another packet. - * Note: called with BH disabled + * Works in tandem with icmp_global_consume(). */ bool icmp_global_allow(void) { - u32 credit, delta, incr = 0, now = (u32)jiffies; - bool rc = false; + u32 delta, now, oldstamp; + int incr, new, old;
- /* Check if token bucket is empty and cannot be refilled - * without taking the spinlock. The READ_ONCE() are paired - * with the following WRITE_ONCE() in this same function. + /* Note: many cpus could find this condition true. + * Then later icmp_global_consume() could consume more credits, + * this is an acceptable race. */ - if (!READ_ONCE(icmp_global.credit)) { - delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ); - if (delta < HZ / 50) - return false; - } + if (atomic_read(&icmp_global.credit) > 0) + return true;
- spin_lock(&icmp_global.lock); - delta = min_t(u32, now - icmp_global.stamp, HZ); - if (delta >= HZ / 50) { - incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; - if (incr) - WRITE_ONCE(icmp_global.stamp, now); - } - credit = min_t(u32, icmp_global.credit + incr, - READ_ONCE(sysctl_icmp_msgs_burst)); - if (credit) { - /* We want to use a credit of one in average, but need to randomize - * it for security reasons. - */ - credit = max_t(int, credit - get_random_u32_below(3), 0); - rc = true; + now = jiffies; + oldstamp = READ_ONCE(icmp_global.stamp); + delta = min_t(u32, now - oldstamp, HZ); + if (delta < HZ / 50) + return false; + + incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; + if (!incr) + return false; + + if (cmpxchg(&icmp_global.stamp, oldstamp, now) == oldstamp) { + old = atomic_read(&icmp_global.credit); + do { + new = min(old + incr, READ_ONCE(sysctl_icmp_msgs_burst)); + } while (!atomic_try_cmpxchg(&icmp_global.credit, &old, new)); } - WRITE_ONCE(icmp_global.credit, credit); - spin_unlock(&icmp_global.lock); - return rc; + return true; } EXPORT_SYMBOL(icmp_global_allow);
+void icmp_global_consume(void) +{ + int credits = get_random_u32_below(3); + + /* Note: this might make icmp_global.credit negative. */ + if (credits) + atomic_sub(credits, &icmp_global.credit); +} +EXPORT_SYMBOL(icmp_global_consume); + static bool icmpv4_mask_allow(struct net *net, int type, int code) { if (type > NR_ICMP_TYPES) @@ -291,14 +293,16 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code) return false; }
-static bool icmpv4_global_allow(struct net *net, int type, int code) +static bool icmpv4_global_allow(struct net *net, int type, int code, + bool *apply_ratelimit) { if (icmpv4_mask_allow(net, type, code)) return true;
- if (icmp_global_allow()) + if (icmp_global_allow()) { + *apply_ratelimit = true; return true; - + } __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); return false; } @@ -308,15 +312,16 @@ static bool icmpv4_global_allow(struct net *net, int type, int code) */
static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, - struct flowi4 *fl4, int type, int code) + struct flowi4 *fl4, int type, int code, + bool apply_ratelimit) { struct dst_entry *dst = &rt->dst; struct inet_peer *peer; bool rc = true; int vif;
- if (icmpv4_mask_allow(net, type, code)) - goto out; + if (!apply_ratelimit) + return true;
/* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) @@ -331,6 +336,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, out: if (!rc) __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST); + else + icmp_global_consume(); return rc; }
@@ -402,6 +409,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); + bool apply_ratelimit = false; struct flowi4 fl4; struct sock *sk; struct inet_sock *inet; @@ -413,11 +421,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb)) return;
- /* Needed by both icmp_global_allow and icmp_xmit_lock */ + /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ local_bh_disable();
- /* global icmp_msgs_per_sec */ - if (!icmpv4_global_allow(net, type, code)) + /* is global icmp_msgs_per_sec exhausted ? */ + if (!icmpv4_global_allow(net, type, code, &apply_ratelimit)) goto out_bh_enable;
sk = icmp_xmit_lock(net); @@ -450,7 +458,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; - if (icmpv4_xrlim_allow(net, rt, &fl4, type, code)) + if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt); ip_rt_put(rt); out_unlock: @@ -596,6 +604,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); + bool apply_ratelimit = false; struct ipcm_cookie ipc; struct flowi4 fl4; __be32 saddr; @@ -677,7 +686,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, } }
- /* Needed by both icmp_global_allow and icmp_xmit_lock */ + /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit, unless @@ -685,7 +694,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) */ if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && - !icmpv4_global_allow(net, type, code)) + !icmpv4_global_allow(net, type, code, &apply_ratelimit)) goto out_bh_enable;
sk = icmp_xmit_lock(net); @@ -744,7 +753,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, goto out_unlock;
/* peer icmp_ratelimit */ - if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code)) + if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */ diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 08d4b7132d4c..1c9c686d9522 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -323,6 +323,7 @@ config IPV6_RPL_LWTUNNEL bool "IPv6: RPL Source Routing Header support" depends on IPV6 select LWTUNNEL + select DST_CACHE help Support for RFC6554 RPL Source Routing Header using the lightweight tunnels mechanism. diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 7b31674644ef..46f70e4a8351 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -175,14 +175,16 @@ static bool icmpv6_mask_allow(struct net *net, int type) return false; }
-static bool icmpv6_global_allow(struct net *net, int type) +static bool icmpv6_global_allow(struct net *net, int type, + bool *apply_ratelimit) { if (icmpv6_mask_allow(net, type)) return true;
- if (icmp_global_allow()) + if (icmp_global_allow()) { + *apply_ratelimit = true; return true; - + } __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); return false; } @@ -191,13 +193,13 @@ static bool icmpv6_global_allow(struct net *net, int type) * Check the ICMP output rate limit */ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, - struct flowi6 *fl6) + struct flowi6 *fl6, bool apply_ratelimit) { struct net *net = sock_net(sk); struct dst_entry *dst; bool res = false;
- if (icmpv6_mask_allow(net, type)) + if (!apply_ratelimit) return true;
/* @@ -228,6 +230,8 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, if (!res) __ICMP6_INC_STATS(net, ip6_dst_idev(dst), ICMP6_MIB_RATELIMITHOST); + else + icmp_global_consume(); dst_release(dst); return res; } @@ -452,6 +456,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, struct net *net; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; + bool apply_ratelimit = false; struct dst_entry *dst; struct icmp6hdr tmp_hdr; struct flowi6 fl6; @@ -533,11 +538,12 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, return; }
- /* Needed by both icmp_global_allow and icmpv6_xmit_lock */ + /* Needed by both icmpv6_global_allow and icmpv6_xmit_lock */ local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit */ - if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type)) + if (!(skb->dev->flags & IFF_LOOPBACK) && + !icmpv6_global_allow(net, type, &apply_ratelimit)) goto out_bh_enable;
mip6_addr_swap(skb, parm); @@ -575,7 +581,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
np = inet6_sk(sk);
- if (!icmpv6_xrlim_allow(sk, type, &fl6)) + if (!icmpv6_xrlim_allow(sk, type, &fl6, apply_ratelimit)) goto out;
tmp_hdr.icmp6_type = type; @@ -717,6 +723,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct icmp6hdr *icmph = icmp6_hdr(skb); + bool apply_ratelimit = false; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; @@ -781,8 +788,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) goto out;
/* Check the ratelimit */ - if ((!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY)) || - !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6)) + if ((!(skb->dev->flags & IFF_LOOPBACK) && + !icmpv6_global_allow(net, ICMPV6_ECHO_REPLY, &apply_ratelimit)) || + !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6, apply_ratelimit)) goto out_dst_release;
idev = __in6_dev_get(skb->dev); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index dedee264b8f6..b9457473c176 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -223,33 +223,23 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, const struct tcphdr *oth, unsigned int otcplen) { struct tcphdr *tcph; - int needs_ack;
skb_reset_transport_header(nskb); - tcph = skb_put(nskb, sizeof(struct tcphdr)); + tcph = skb_put_zero(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; tcph->source = oth->dest; tcph->dest = oth->source;
if (oth->ack) { - needs_ack = 0; tcph->seq = oth->ack_seq; - tcph->ack_seq = 0; } else { - needs_ack = 1; tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + otcplen - (oth->doff<<2)); - tcph->seq = 0; + tcph->ack = 1; }
- /* Reset flags */ - ((u_int8_t *)tcph)[13] = 0; tcph->rst = 1; - tcph->ack = needs_ack; - tcph->window = 0; - tcph->urg_ptr = 0; - tcph->check = 0;
/* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 219701caba1e..b4dcd8f3e7ba 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -174,7 +174,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) struct net_device *rt_dev = rt->dst.dev; bool handled = false;
- if (rt_idev->dev == dev) { + if (rt_idev && rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(blackhole_netdev); in6_dev_put(rt_idev); handled = true; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index 2c83b7586422..db3c19a42e1c 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -263,10 +263,8 @@ static int rpl_input(struct sk_buff *skb) rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
err = rpl_do_srh(skb, rlwt); - if (unlikely(err)) { - kfree_skb(skb); - return err; - } + if (unlikely(err)) + goto drop;
local_bh_disable(); dst = dst_cache_get(&rlwt->cache); @@ -286,9 +284,13 @@ static int rpl_input(struct sk_buff *skb)
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) - return err; + goto drop;
return dst_input(skb); + +drop: + kfree_skb(skb); + return err; }
static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b4ad66af3af3..f735e41560a3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -462,6 +462,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do { struct ieee80211_local *local = sdata->local; unsigned long flags; + struct sk_buff_head freeq; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i, flushed; @@ -637,18 +638,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do skb_queue_purge(&sdata->status_queue); }
+ /* + * Since ieee80211_free_txskb() may issue __dev_queue_xmit() + * which should be called with interrupts enabled, reclamation + * is done in two phases: + */ + __skb_queue_head_init(&freeq); + + /* unlink from local queues... */ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); - ieee80211_free_txskb(&local->hw, skb); + __skb_queue_tail(&freeq, skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ /* ... and perform actual reclamation with interrupts enabled. */ + skb_queue_walk_safe(&freeq, skb, tmp) { + __skb_unlink(skb, &freeq); + ieee80211_free_txskb(&local->hw, skb); + } + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ieee80211_txq_remove_vlan(local, sdata);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f9526bbc3633..9e3d2ed9cf67 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4715,7 +4715,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, ((assoc_data->wmm && !elems->wmm_param) || (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT && (!elems->ht_cap_elem || !elems->ht_operation)) || - (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT && + (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT && (!elems->vht_cap_elem || !elems->vht_operation)))) { const struct cfg80211_bss_ies *ies; struct ieee802_11_elems *bss_elems; @@ -4763,19 +4763,22 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, sdata_info(sdata, "AP bug: HT operation missing from AssocResp\n"); } - if (!elems->vht_cap_elem && bss_elems->vht_cap_elem && - link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { - elems->vht_cap_elem = bss_elems->vht_cap_elem; - sdata_info(sdata, - "AP bug: VHT capa missing from AssocResp\n"); - } - if (!elems->vht_operation && bss_elems->vht_operation && - link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { - elems->vht_operation = bss_elems->vht_operation; - sdata_info(sdata, - "AP bug: VHT operation missing from AssocResp\n"); - }
+ if (is_5ghz) { + if (!elems->vht_cap_elem && bss_elems->vht_cap_elem && + link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { + elems->vht_cap_elem = bss_elems->vht_cap_elem; + sdata_info(sdata, + "AP bug: VHT capa missing from AssocResp\n"); + } + + if (!elems->vht_operation && bss_elems->vht_operation && + link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) { + elems->vht_operation = bss_elems->vht_operation; + sdata_info(sdata, + "AP bug: VHT operation missing from AssocResp\n"); + } + } kfree(bss_elems); }
@@ -7660,6 +7663,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) lockdep_assert_wiphy(sdata->local->hw.wiphy);
assoc_data->tries++; + assoc_data->comeback = false; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { sdata_info(sdata, "association with %pM timed out\n", assoc_data->ap_addr); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 28d03196ef75..29fab7ae47b4 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -997,6 +997,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, }
IEEE80211_SKB_CB(skb)->flags = flags; + IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
skb->dev = sdata->dev;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 4dc1def69548..3dc9752188d5 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -890,7 +890,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, if (ieee80211_is_tx_data(skb)) rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
- if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) + if (!(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) mask = sdata->rc_rateidx_mask[info->band];
if (dest[0].idx < 0) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index b5f2df61c7f6..1c5d99975ad0 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -649,7 +649,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; - IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX; + IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); } } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index bca7b341dd77..a9ee86982259 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -699,7 +699,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) txrc.skb = tx->skb; txrc.reported_rate.idx = -1;
- if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) { + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) { txrc.rate_idx_mask = ~0; } else { txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 4cbf71d0786b..c55cf5bc36b2 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -382,7 +382,7 @@ static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) #define ctnetlink_dump_secctx(a, b) (0) #endif
-#ifdef CONFIG_NF_CONNTRACK_LABELS +#ifdef CONFIG_NF_CONNTRACK_EVENTS static inline int ctnetlink_label_size(const struct nf_conn *ct) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); @@ -391,6 +391,7 @@ static inline int ctnetlink_label_size(const struct nf_conn *ct) return 0; return nla_total_size(sizeof(labels->bits)); } +#endif
static int ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) @@ -411,10 +412,6 @@ ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
return 0; } -#else -#define ctnetlink_dump_labels(a, b) (0) -#define ctnetlink_label_size(a) (0) -#endif
#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0a2f79346958..472f211472db 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -393,6 +393,7 @@ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *tr { struct nftables_pernet *nft_net = nft_pernet(net); struct nft_trans_binding *binding; + struct nft_trans_set *trans_set;
list_add_tail(&trans->list, &nft_net->commit_list);
@@ -402,9 +403,13 @@ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *tr
switch (trans->msg_type) { case NFT_MSG_NEWSET: + trans_set = nft_trans_container_set(trans); + if (!nft_trans_set_update(trans) && nft_set_is_anonymous(nft_trans_set(trans))) list_add_tail(&binding->binding_list, &nft_net->binding_list); + + list_add_tail(&trans_set->list_trans_newset, &nft_net->commit_set_list); break; case NFT_MSG_NEWCHAIN: if (!nft_trans_chain_update(trans) && @@ -611,6 +616,7 @@ static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
trans_set = nft_trans_container_set(trans); INIT_LIST_HEAD(&trans_set->nft_trans_binding.binding_list); + INIT_LIST_HEAD(&trans_set->list_trans_newset);
if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) { nft_trans_set_id(trans) = @@ -1841,7 +1847,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family, if (!hook_list) hook_list = &basechain->hook_list;
- list_for_each_entry(hook, hook_list, list) { + list_for_each_entry_rcu(hook, hook_list, list) { if (!first) first = hook;
@@ -4485,17 +4491,16 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net, { struct nftables_pernet *nft_net = nft_pernet(net); u32 id = ntohl(nla_get_be32(nla)); - struct nft_trans *trans; + struct nft_trans_set *trans;
- list_for_each_entry(trans, &nft_net->commit_list, list) { - if (trans->msg_type == NFT_MSG_NEWSET) { - struct nft_set *set = nft_trans_set(trans); + /* its likely the id we need is at the tail, not at start */ + list_for_each_entry_reverse(trans, &nft_net->commit_set_list, list_trans_newset) { + struct nft_set *set = trans->set;
- if (id == nft_trans_set_id(trans) && - set->table == table && - nft_active_genmask(set, genmask)) - return set; - } + if (id == trans->set_id && + set->table == table && + nft_active_genmask(set, genmask)) + return set; } return ERR_PTR(-ENOENT); } @@ -4587,7 +4592,7 @@ int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) return -ERANGE;
ms *= NSEC_PER_MSEC; - *result = nsecs_to_jiffies64(ms); + *result = nsecs_to_jiffies64(ms) ? : !!ms; return 0; }
@@ -6674,7 +6679,7 @@ static int nft_setelem_catchall_insert(const struct net *net, } }
- catchall = kmalloc(sizeof(*catchall), GFP_KERNEL); + catchall = kmalloc(sizeof(*catchall), GFP_KERNEL_ACCOUNT); if (!catchall) return -ENOMEM;
@@ -6910,17 +6915,23 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; } else if (set->flags & NFT_SET_TIMEOUT && !(flags & NFT_SET_ELEM_INTERVAL_END)) { - timeout = READ_ONCE(set->timeout); + timeout = set->timeout; }
expiration = 0; if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) return -EINVAL; + if (timeout == 0) + return -EOPNOTSUPP; + err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION], &expiration); if (err) return err; + + if (expiration > timeout) + return -ERANGE; }
if (nla[NFTA_SET_ELEM_EXPR]) { @@ -7011,7 +7022,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (err < 0) goto err_parse_key_end;
- if (timeout != READ_ONCE(set->timeout)) { + if (timeout != set->timeout) { err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT); if (err < 0) goto err_parse_key_end; @@ -9174,7 +9185,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) flowtable->data.type->setup(&flowtable->data, hook->ops.dev, FLOW_BLOCK_UNBIND); list_del_rcu(&hook->list); - kfree(hook); + kfree_rcu(hook, rcu); } kfree(flowtable->name); module_put(flowtable->data.type->owner); @@ -10447,6 +10458,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_NEWSET: + list_del(&nft_trans_container_set(trans)->list_trans_newset); if (nft_trans_set_update(trans)) { struct nft_set *set = nft_trans_set(trans);
@@ -10755,6 +10767,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: + list_del(&nft_trans_container_set(trans)->list_trans_newset); if (nft_trans_set_update(trans)) { nft_trans_destroy(trans); break; @@ -10850,6 +10863,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) } }
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_set_list)); + nft_set_abort_update(&set_update_list);
synchronize_rcu(); @@ -11519,6 +11534,7 @@ static int __net_init nf_tables_init_net(struct net *net)
INIT_LIST_HEAD(&nft_net->tables); INIT_LIST_HEAD(&nft_net->commit_list); + INIT_LIST_HEAD(&nft_net->commit_set_list); INIT_LIST_HEAD(&nft_net->binding_list); INIT_LIST_HEAD(&nft_net->module_list); INIT_LIST_HEAD(&nft_net->notify_list); @@ -11549,6 +11565,7 @@ static void __net_exit nf_tables_exit_net(struct net *net) gc_seq = nft_gc_seq_begin(nft_net);
WARN_ON_ONCE(!list_empty(&nft_net->commit_list)); + WARN_ON_ONCE(!list_empty(&nft_net->commit_set_list));
if (!list_empty(&nft_net->module_list)) nf_tables_module_autoload_cleanup(net); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index d3d11dede545..85450f601142 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -536,7 +536,7 @@ nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr, struct xt_match *m = expr->ops->data; int ret;
- priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL); + priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL_ACCOUNT); if (!priv->info) return -ENOMEM;
@@ -810,7 +810,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, goto err; }
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL); + ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT); if (!ops) { err = -ENOMEM; goto err; @@ -900,7 +900,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, goto err; }
- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL); + ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT); if (!ops) { err = -ENOMEM; goto err; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index b4ada3ab2167..489a9b34f1ec 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -56,7 +56,7 @@ static struct nft_elem_priv *nft_dynset_new(struct nft_set *set, if (!atomic_add_unless(&set->nelems, 1, set->size)) return NULL;
- timeout = priv->timeout ? : set->timeout; + timeout = priv->timeout ? : READ_ONCE(set->timeout); elem_priv = nft_set_elem_init(set, &priv->tmpl, ®s->data[priv->sreg_key], NULL, ®s->data[priv->sreg_data], @@ -95,7 +95,7 @@ void nft_dynset_eval(const struct nft_expr *expr, expr, regs, &ext)) { if (priv->op == NFT_DYNSET_OP_UPDATE && nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) { - timeout = priv->timeout ? : set->timeout; + timeout = priv->timeout ? : READ_ONCE(set->timeout); *nft_set_ext_expiration(ext) = get_jiffies_64() + timeout; }
@@ -313,7 +313,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx, nft_dynset_ext_add_expr(priv);
if (set->flags & NFT_SET_TIMEOUT) { - if (timeout || set->timeout) { + if (timeout || READ_ONCE(set->timeout)) { nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); } diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c index 5defe6e4fd98..e35588137995 100644 --- a/net/netfilter/nft_log.c +++ b/net/netfilter/nft_log.c @@ -163,7 +163,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
nla = tb[NFTA_LOG_PREFIX]; if (nla != NULL) { - priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL); + priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL_ACCOUNT); if (priv->prefix == NULL) return -ENOMEM; nla_strscpy(priv->prefix, nla, nla_len(nla) + 1); diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 9139ce38ea7b..f23faf565b68 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -954,7 +954,7 @@ static int nft_secmark_obj_init(const struct nft_ctx *ctx, if (tb[NFTA_SECMARK_CTX] == NULL) return -EINVAL;
- priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL); + priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL_ACCOUNT); if (!priv->ctx) return -ENOMEM;
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c index 7d29db7c2ac0..bd058babfc82 100644 --- a/net/netfilter/nft_numgen.c +++ b/net/netfilter/nft_numgen.c @@ -66,7 +66,7 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW;
- priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL); + priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL_ACCOUNT); if (!priv->counter) return -ENOMEM;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index eb4c4a4ac7ac..7be342b495f5 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -663,7 +663,7 @@ static int pipapo_realloc_mt(struct nft_pipapo_field *f, check_add_overflow(rules, extra, &rules_alloc)) return -EOVERFLOW;
- new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL); + new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL_ACCOUNT); if (!new_mt) return -ENOMEM;
@@ -936,7 +936,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) return; }
- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL); + new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT); if (!new_lt) return;
@@ -1212,7 +1212,7 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone, scratch = kzalloc_node(struct_size(scratch, map, bsize_max * 2) + NFT_PIPAPO_ALIGN_HEADROOM, - GFP_KERNEL, cpu_to_node(i)); + GFP_KERNEL_ACCOUNT, cpu_to_node(i)); if (!scratch) { /* On failure, there's no need to undo previous * allocations: this means that some scratch maps have @@ -1427,7 +1427,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) struct nft_pipapo_match *new; int i;
- new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL); + new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL_ACCOUNT); if (!new) return NULL;
@@ -1457,7 +1457,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) * src->bsize * sizeof(*dst->lt) + NFT_PIPAPO_ALIGN_HEADROOM, - GFP_KERNEL); + GFP_KERNEL_ACCOUNT); if (!new_lt) goto out_lt;
@@ -1470,7 +1470,8 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (src->rules > 0) { dst->mt = kvmalloc_array(src->rules_alloc, - sizeof(*src->mt), GFP_KERNEL); + sizeof(*src->mt), + GFP_KERNEL_ACCOUNT); if (!dst->mt) goto out_mt;
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 60a76e6e348e..5c6ed68cc6e0 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -509,13 +509,14 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx, return err; }
- md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL); + md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, + GFP_KERNEL_ACCOUNT); if (!md) return -ENOMEM;
memcpy(&md->u.tun_info, &info, sizeof(info)); #ifdef CONFIG_DST_CACHE - err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL); + err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT); if (err < 0) { metadata_dst_free(md); return err; diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c index 41ece61eb57a..00c51cf693f3 100644 --- a/net/qrtr/af_qrtr.c +++ b/net/qrtr/af_qrtr.c @@ -884,7 +884,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
mutex_lock(&qrtr_node_lock); list_for_each_entry(node, &qrtr_all_nodes, item) { - skbn = skb_clone(skb, GFP_KERNEL); + skbn = pskb_copy(skb, GFP_KERNEL); if (!skbn) break; skb_set_owner_w(skbn, skb->sk); diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 593846d25214..114fef65f92e 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, { struct tipc_msg *hdr, *_hdr; struct sk_buff_head tmpq; + u16 cong_link_cnt = 0; struct sk_buff *_skb; - u16 cong_link_cnt; int rc = 0;
/* Is a cluster supporting with new capabilities ? */ diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0be0dcb07f7b..001ccc55ef0f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -693,10 +693,7 @@ static void unix_release_sock(struct sock *sk, int embrion) unix_state_unlock(sk);
#if IS_ENABLED(CONFIG_AF_UNIX_OOB) - if (u->oob_skb) { - kfree_skb(u->oob_skb); - u->oob_skb = NULL; - } + u->oob_skb = NULL; #endif
wake_up_interruptible_all(&u->peer_wait); @@ -2226,13 +2223,9 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other }
maybe_add_creds(skb, sock, other); - skb_get(skb); - scm_stat_add(other, skb);
spin_lock(&other->sk_receive_queue.lock); - if (ousk->oob_skb) - consume_skb(ousk->oob_skb); WRITE_ONCE(ousk->oob_skb, skb); __skb_queue_tail(&other->sk_receive_queue, skb); spin_unlock(&other->sk_receive_queue.lock); @@ -2640,8 +2633,6 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
if (!(state->flags & MSG_PEEK)) WRITE_ONCE(u->oob_skb, NULL); - else - skb_get(oob_skb);
spin_unlock(&sk->sk_receive_queue.lock); unix_state_unlock(sk); @@ -2651,8 +2642,6 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) if (!(state->flags & MSG_PEEK)) UNIXCB(oob_skb).consumed += 1;
- consume_skb(oob_skb); - mutex_unlock(&u->iolock);
if (chunk < 0) @@ -2665,56 +2654,52 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state) static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, int flags, int copied) { + struct sk_buff *read_skb = NULL, *unread_skb = NULL; struct unix_sock *u = unix_sk(sk);
- if (!unix_skb_len(skb)) { - struct sk_buff *unlinked_skb = NULL; + if (likely(unix_skb_len(skb) && skb != READ_ONCE(u->oob_skb))) + return skb;
- spin_lock(&sk->sk_receive_queue.lock); + spin_lock(&sk->sk_receive_queue.lock);
+ if (!unix_skb_len(skb)) { if (copied && (!u->oob_skb || skb == u->oob_skb)) { skb = NULL; } else if (flags & MSG_PEEK) { skb = skb_peek_next(skb, &sk->sk_receive_queue); } else { - unlinked_skb = skb; + read_skb = skb; skb = skb_peek_next(skb, &sk->sk_receive_queue); - __skb_unlink(unlinked_skb, &sk->sk_receive_queue); + __skb_unlink(read_skb, &sk->sk_receive_queue); }
- spin_unlock(&sk->sk_receive_queue.lock); + if (!skb) + goto unlock; + }
- consume_skb(unlinked_skb); - } else { - struct sk_buff *unlinked_skb = NULL; + if (skb != u->oob_skb) + goto unlock;
- spin_lock(&sk->sk_receive_queue.lock); + if (copied) { + skb = NULL; + } else if (!(flags & MSG_PEEK)) { + WRITE_ONCE(u->oob_skb, NULL);
- if (skb == u->oob_skb) { - if (copied) { - skb = NULL; - } else if (!(flags & MSG_PEEK)) { - if (sock_flag(sk, SOCK_URGINLINE)) { - WRITE_ONCE(u->oob_skb, NULL); - consume_skb(skb); - } else { - __skb_unlink(skb, &sk->sk_receive_queue); - WRITE_ONCE(u->oob_skb, NULL); - unlinked_skb = skb; - skb = skb_peek(&sk->sk_receive_queue); - } - } else if (!sock_flag(sk, SOCK_URGINLINE)) { - skb = skb_peek_next(skb, &sk->sk_receive_queue); - } + if (!sock_flag(sk, SOCK_URGINLINE)) { + __skb_unlink(skb, &sk->sk_receive_queue); + unread_skb = skb; + skb = skb_peek(&sk->sk_receive_queue); } + } else if (!sock_flag(sk, SOCK_URGINLINE)) { + skb = skb_peek_next(skb, &sk->sk_receive_queue); + }
- spin_unlock(&sk->sk_receive_queue.lock); +unlock: + spin_unlock(&sk->sk_receive_queue.lock); + + consume_skb(read_skb); + kfree_skb(unread_skb);
- if (unlinked_skb) { - WARN_ON_ONCE(skb_unref(unlinked_skb)); - kfree_skb(unlinked_skb); - } - } return skb; } #endif @@ -2756,7 +2741,6 @@ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) unix_state_unlock(sk);
if (drop) { - WARN_ON_ONCE(skb_unref(skb)); kfree_skb(skb); return -EAGAIN; } @@ -3192,9 +3176,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) skb = skb_peek(&sk->sk_receive_queue); if (skb) { struct sk_buff *oob_skb = READ_ONCE(u->oob_skb); + struct sk_buff *next_skb; + + next_skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (skb == oob_skb || - (!oob_skb && !unix_skb_len(skb))) + (!unix_skb_len(skb) && + (!oob_skb || next_skb == oob_skb))) answ = 1; }
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 06d94ad999e9..0068e758be4d 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -337,18 +337,6 @@ static bool unix_vertex_dead(struct unix_vertex *vertex) return true; }
-static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) -{ - skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); - -#if IS_ENABLED(CONFIG_AF_UNIX_OOB) - if (u->oob_skb) { - WARN_ON_ONCE(skb_unref(u->oob_skb)); - u->oob_skb = NULL; - } -#endif -} - static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) { struct unix_vertex *vertex; @@ -371,11 +359,11 @@ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
spin_lock(&embryo_queue->lock); - unix_collect_queue(unix_sk(skb->sk), hitlist); + skb_queue_splice_init(embryo_queue, hitlist); spin_unlock(&embryo_queue->lock); } } else { - unix_collect_queue(u, hitlist); + skb_queue_splice_init(queue, hitlist); }
spin_unlock(&queue->lock); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7397a372c78e..1d83bc3de5ca 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9778,7 +9778,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, return ERR_PTR(-ENOMEM);
if (n_ssids) - request->ssids = (void *)&request->channels[n_channels]; + request->ssids = (void *)request + + struct_size(request, channels, n_channels); request->n_ssids = n_ssids; if (ie_len) { if (n_ssids) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 64eeed82d43d..3ff818849d83 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -3467,8 +3467,8 @@ int cfg80211_wext_siwscan(struct net_device *dev, n_channels = ieee80211_get_num_supported_channels(wiphy); }
- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + - n_channels * sizeof(void *), + creq = kzalloc(struct_size(creq, channels, n_channels) + + sizeof(struct cfg80211_ssid), GFP_ATOMIC); if (!creq) return -ENOMEM; @@ -3476,7 +3476,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, creq->wiphy = wiphy; creq->wdev = dev->ieee80211_ptr; /* SSIDs come after channels */ - creq->ssids = (void *)&creq->channels[n_channels]; + creq->ssids = (void *)creq + struct_size(creq, channels, n_channels); creq->n_channels = n_channels; creq->n_ssids = 1; creq->scan_start = jiffies; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d9d7bf8bb5c1..431da30817a6 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -115,7 +115,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) n_channels = i; } request->n_channels = n_channels; - request->ssids = (void *)&request->channels[n_channels]; + request->ssids = (void *)request + + struct_size(request, channels, n_channels); request->n_ssids = 1;
memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, diff --git a/net/wireless/util.c b/net/wireless/util.c index 9a7c3adc8a3b..edeeb056fe4d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -998,10 +998,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, * Diffserv Service Classes no update is needed: * - Standard: DF * - Low Priority Data: CS1 - * - Multimedia Streaming: AF31, AF32, AF33 * - Multimedia Conferencing: AF41, AF42, AF43 * - Network Control Traffic: CS7 * - Real-Time Interactive: CS4 + * - Signaling: CS5 */ switch (dscp >> 2) { case 10: @@ -1026,9 +1026,11 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, /* Broadcasting video: CS3 */ ret = 4; break; - case 40: - /* Signaling: CS5 */ - ret = 5; + case 26: + case 28: + case 30: + /* Multimedia Streaming: AF31, AF32, AF33 */ + ret = 4; break; case 44: /* Voice Admit: VA */ diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index c0e0204b9630..b0f24ebd05f0 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -623,20 +623,31 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3 return nb_entries; }
-u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp, + u32 max) { - u32 nb_entries1 = 0, nb_entries2; + int i;
- if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) { + for (i = 0; i < max; i++) { struct xdp_buff *buff;
- /* Slow path */ buff = xp_alloc(pool); - if (buff) - *xdp = buff; - return !!buff; + if (unlikely(!buff)) + return i; + *xdp = buff; + xdp++; }
+ return max; +} + +u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) +{ + u32 nb_entries1 = 0, nb_entries2; + + if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) + return xp_alloc_slow(pool, xdp, max); + if (unlikely(pool->free_list_cnt)) { nb_entries1 = xp_alloc_reused(pool, xdp, max); if (nb_entries1 == max) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 3e003dd6bea0..dca56aa360ff 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -169,6 +169,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic endif endif
+ifeq ($(ARCH), x86) +BPF_EXTRA_CFLAGS += -fcf-protection +endif + TPROGS_CFLAGS += -Wall -O2 TPROGS_CFLAGS += -Wmissing-prototypes TPROGS_CFLAGS += -Wstrict-prototypes @@ -405,7 +409,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -fno-asynchronous-unwind-tables -fcf-protection \ + -fno-asynchronous-unwind-tables \ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h index 67bf888c3bd6..c42ed8a73f1c 100644 --- a/security/apparmor/include/net.h +++ b/security/apparmor/include/net.h @@ -51,10 +51,9 @@ struct aa_sk_ctx { struct aa_label *peer; };
-#define SK_CTX(X) ((X)->sk_security) static inline struct aa_sk_ctx *aa_sock(const struct sock *sk) { - return sk->sk_security; + return sk->sk_security + apparmor_blob_sizes.lbs_sock; }
#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \ diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 808060f9effb..f5d05297d59e 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -1058,27 +1058,12 @@ static int apparmor_userns_create(const struct cred *cred) return error; }
-static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags) -{ - struct aa_sk_ctx *ctx; - - ctx = kzalloc(sizeof(*ctx), flags); - if (!ctx) - return -ENOMEM; - - sk->sk_security = ctx; - - return 0; -} - static void apparmor_sk_free_security(struct sock *sk) { struct aa_sk_ctx *ctx = aa_sock(sk);
- sk->sk_security = NULL; aa_put_label(ctx->label); aa_put_label(ctx->peer); - kfree(ctx); }
/** @@ -1433,6 +1418,7 @@ struct lsm_blob_sizes apparmor_blob_sizes __ro_after_init = { .lbs_cred = sizeof(struct aa_label *), .lbs_file = sizeof(struct aa_file_ctx), .lbs_task = sizeof(struct aa_task_ctx), + .lbs_sock = sizeof(struct aa_sk_ctx), };
static const struct lsm_id apparmor_lsmid = { @@ -1478,7 +1464,6 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = { LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
- LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security), LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security), LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
diff --git a/security/apparmor/net.c b/security/apparmor/net.c index 87e934b2b548..77413a519117 100644 --- a/security/apparmor/net.c +++ b/security/apparmor/net.c @@ -151,7 +151,7 @@ static int aa_label_sk_perm(const struct cred *subj_cred, const char *op, u32 request, struct sock *sk) { - struct aa_sk_ctx *ctx = SK_CTX(sk); + struct aa_sk_ctx *ctx = aa_sock(sk); int error = 0;
AA_BUG(!label); diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c index 57b9ffd53c98..3663aec7bcbd 100644 --- a/security/bpf/hooks.c +++ b/security/bpf/hooks.c @@ -31,7 +31,6 @@ static int __init bpf_lsm_init(void)
struct lsm_blob_sizes bpf_lsm_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct bpf_storage_blob), - .lbs_task = sizeof(struct bpf_storage_blob), };
DEFINE_LSM(bpf) = { diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index c51e24d24d1e..3c323ca213d4 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -223,7 +223,7 @@ static inline void ima_inode_set_iint(const struct inode *inode,
struct ima_iint_cache *ima_iint_find(struct inode *inode); struct ima_iint_cache *ima_inode_get(struct inode *inode); -void ima_inode_free(struct inode *inode); +void ima_inode_free_rcu(void *inode_security); void __init ima_iintcache_init(void);
extern const int read_idmap[]; diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index e23412a2c56b..00b249101f98 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c @@ -109,22 +109,18 @@ struct ima_iint_cache *ima_inode_get(struct inode *inode) }
/** - * ima_inode_free - Called on inode free - * @inode: Pointer to the inode + * ima_inode_free_rcu - Called to free an inode via a RCU callback + * @inode_security: The inode->i_security pointer * - * Free the iint associated with an inode. + * Free the IMA data associated with an inode. */ -void ima_inode_free(struct inode *inode) +void ima_inode_free_rcu(void *inode_security) { - struct ima_iint_cache *iint; - - if (!IS_IMA(inode)) - return; - - iint = ima_iint_find(inode); - ima_inode_set_iint(inode, NULL); + struct ima_iint_cache **iint_p = inode_security + ima_blob_sizes.lbs_inode;
- ima_iint_free(iint); + /* *iint_p should be NULL if !IS_IMA(inode) */ + if (*iint_p) + ima_iint_free(*iint_p); }
static void ima_iint_init_once(void *foo) diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index f04f43af651c..5b3394864b21 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -1193,7 +1193,7 @@ static struct security_hook_list ima_hooks[] __ro_after_init = { #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS LSM_HOOK_INIT(kernel_module_request, ima_kernel_module_request), #endif - LSM_HOOK_INIT(inode_free_security, ima_inode_free), + LSM_HOOK_INIT(inode_free_security_rcu, ima_inode_free_rcu), };
static const struct lsm_id ima_lsmid = { diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 7877a64cc6b8..0804f76a67be 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -1207,13 +1207,16 @@ static int current_check_refer_path(struct dentry *const old_dentry,
/* Inode hooks */
-static void hook_inode_free_security(struct inode *const inode) +static void hook_inode_free_security_rcu(void *inode_security) { + struct landlock_inode_security *inode_sec; + /* * All inodes must already have been untied from their object by * release_inode() or hook_sb_delete(). */ - WARN_ON_ONCE(landlock_inode(inode)->object); + inode_sec = inode_security + landlock_blob_sizes.lbs_inode; + WARN_ON_ONCE(inode_sec->object); }
/* Super-block hooks */ @@ -1637,7 +1640,7 @@ static int hook_file_ioctl_compat(struct file *file, unsigned int cmd, }
static struct security_hook_list landlock_hooks[] __ro_after_init = { - LSM_HOOK_INIT(inode_free_security, hook_inode_free_security), + LSM_HOOK_INIT(inode_free_security_rcu, hook_inode_free_security_rcu),
LSM_HOOK_INIT(sb_delete, hook_sb_delete), LSM_HOOK_INIT(sb_mount, hook_sb_mount), diff --git a/security/security.c b/security/security.c index 8cee5b6c6e6d..43166e341526 100644 --- a/security/security.c +++ b/security/security.c @@ -29,6 +29,7 @@ #include <linux/msg.h> #include <linux/overflow.h> #include <net/flow.h> +#include <net/sock.h>
/* How many LSMs were built into the kernel? */ #define LSM_COUNT (__end_lsm_info - __start_lsm_info) @@ -227,6 +228,7 @@ static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed) lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode); lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc); lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg); + lsm_set_blob_size(&needed->lbs_sock, &blob_sizes.lbs_sock); lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock); lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task); lsm_set_blob_size(&needed->lbs_xattr_count, @@ -401,6 +403,7 @@ static void __init ordered_lsm_init(void) init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); + init_debug("sock blob size = %d\n", blob_sizes.lbs_sock); init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock); init_debug("task blob size = %d\n", blob_sizes.lbs_task); init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count); @@ -1596,9 +1599,8 @@ int security_inode_alloc(struct inode *inode)
static void inode_free_by_rcu(struct rcu_head *head) { - /* - * The rcu head is at the start of the inode blob - */ + /* The rcu head is at the start of the inode blob */ + call_void_hook(inode_free_security_rcu, head); kmem_cache_free(lsm_inode_cache, head); }
@@ -1606,23 +1608,24 @@ static void inode_free_by_rcu(struct rcu_head *head) * security_inode_free() - Free an inode's LSM blob * @inode: the inode * - * Deallocate the inode security structure and set @inode->i_security to NULL. + * Release any LSM resources associated with @inode, although due to the + * inode's RCU protections it is possible that the resources will not be + * fully released until after the current RCU grace period has elapsed. + * + * It is important for LSMs to note that despite being present in a call to + * security_inode_free(), @inode may still be referenced in a VFS path walk + * and calls to security_inode_permission() may be made during, or after, + * a call to security_inode_free(). For this reason the inode->i_security + * field is released via a call_rcu() callback and any LSMs which need to + * retain inode state for use in security_inode_permission() should only + * release that state in the inode_free_security_rcu() LSM hook callback. */ void security_inode_free(struct inode *inode) { call_void_hook(inode_free_security, inode); - /* - * The inode may still be referenced in a path walk and - * a call to security_inode_permission() can be made - * after inode_free_security() is called. Ideally, the VFS - * wouldn't do this, but fixing that is a much harder - * job. For now, simply free the i_security via RCU, and - * leave the current inode->i_security pointer intact. - * The inode will be freed after the RCU grace period too. - */ - if (inode->i_security) - call_rcu((struct rcu_head *)inode->i_security, - inode_free_by_rcu); + if (!inode->i_security) + return; + call_rcu((struct rcu_head *)inode->i_security, inode_free_by_rcu); }
/** @@ -4673,6 +4676,28 @@ int security_socket_getpeersec_dgram(struct socket *sock, } EXPORT_SYMBOL(security_socket_getpeersec_dgram);
+/** + * lsm_sock_alloc - allocate a composite sock blob + * @sock: the sock that needs a blob + * @priority: allocation mode + * + * Allocate the sock blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. + */ +static int lsm_sock_alloc(struct sock *sock, gfp_t priority) +{ + if (blob_sizes.lbs_sock == 0) { + sock->sk_security = NULL; + return 0; + } + + sock->sk_security = kzalloc(blob_sizes.lbs_sock, priority); + if (sock->sk_security == NULL) + return -ENOMEM; + return 0; +} + /** * security_sk_alloc() - Allocate and initialize a sock's LSM blob * @sk: sock @@ -4686,7 +4711,14 @@ EXPORT_SYMBOL(security_socket_getpeersec_dgram); */ int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { - return call_int_hook(sk_alloc_security, sk, family, priority); + int rc = lsm_sock_alloc(sk, priority); + + if (unlikely(rc)) + return rc; + rc = call_int_hook(sk_alloc_security, sk, family, priority); + if (unlikely(rc)) + security_sk_free(sk); + return rc; }
/** @@ -4698,6 +4730,8 @@ int security_sk_alloc(struct sock *sk, int family, gfp_t priority) void security_sk_free(struct sock *sk) { call_void_hook(sk_free_security, sk); + kfree(sk->sk_security); + sk->sk_security = NULL; }
/** diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 400eca4ad0fb..c11303d662d8 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4594,7 +4594,7 @@ static int socket_sockcreate_sid(const struct task_security_struct *tsec,
static int sock_has_perm(struct sock *sk, u32 perms) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct common_audit_data ad; struct lsm_network_audit net;
@@ -4662,7 +4662,7 @@ static int selinux_socket_post_create(struct socket *sock, int family, isec->initialized = LABEL_INITIALIZED;
if (sock->sk) { - sksec = sock->sk->sk_security; + sksec = selinux_sock(sock->sk); sksec->sclass = sclass; sksec->sid = sid; /* Allows detection of the first association on this socket */ @@ -4678,8 +4678,8 @@ static int selinux_socket_post_create(struct socket *sock, int family, static int selinux_socket_socketpair(struct socket *socka, struct socket *sockb) { - struct sk_security_struct *sksec_a = socka->sk->sk_security; - struct sk_security_struct *sksec_b = sockb->sk->sk_security; + struct sk_security_struct *sksec_a = selinux_sock(socka->sk); + struct sk_security_struct *sksec_b = selinux_sock(sockb->sk);
sksec_a->peer_sid = sksec_b->sid; sksec_b->peer_sid = sksec_a->sid; @@ -4694,7 +4694,7 @@ static int selinux_socket_socketpair(struct socket *socka, static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { struct sock *sk = sock->sk; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); u16 family; int err;
@@ -4834,7 +4834,7 @@ static int selinux_socket_connect_helper(struct socket *sock, struct sockaddr *address, int addrlen) { struct sock *sk = sock->sk; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); int err;
err = sock_has_perm(sk, SOCKET__CONNECT); @@ -5012,9 +5012,9 @@ static int selinux_socket_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { - struct sk_security_struct *sksec_sock = sock->sk_security; - struct sk_security_struct *sksec_other = other->sk_security; - struct sk_security_struct *sksec_new = newsk->sk_security; + struct sk_security_struct *sksec_sock = selinux_sock(sock); + struct sk_security_struct *sksec_other = selinux_sock(other); + struct sk_security_struct *sksec_new = selinux_sock(newsk); struct common_audit_data ad; struct lsm_network_audit net; int err; @@ -5043,8 +5043,8 @@ static int selinux_socket_unix_stream_connect(struct sock *sock, static int selinux_socket_unix_may_send(struct socket *sock, struct socket *other) { - struct sk_security_struct *ssec = sock->sk->sk_security; - struct sk_security_struct *osec = other->sk->sk_security; + struct sk_security_struct *ssec = selinux_sock(sock->sk); + struct sk_security_struct *osec = selinux_sock(other->sk); struct common_audit_data ad; struct lsm_network_audit net;
@@ -5081,7 +5081,7 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb, u16 family) { int err = 0; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); u32 sk_sid = sksec->sid; struct common_audit_data ad; struct lsm_network_audit net; @@ -5110,7 +5110,7 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb, static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err, peerlbl_active, secmark_active; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); u16 family = sk->sk_family; u32 sk_sid = sksec->sid; struct common_audit_data ad; @@ -5178,7 +5178,7 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, int err = 0; char *scontext = NULL; u32 scontext_len; - struct sk_security_struct *sksec = sock->sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sock->sk); u32 peer_sid = SECSID_NULL;
if (sksec->sclass == SECCLASS_UNIX_STREAM_SOCKET || @@ -5238,34 +5238,27 @@ static int selinux_socket_getpeersec_dgram(struct socket *sock,
static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) { - struct sk_security_struct *sksec; - - sksec = kzalloc(sizeof(*sksec), priority); - if (!sksec) - return -ENOMEM; + struct sk_security_struct *sksec = selinux_sock(sk);
sksec->peer_sid = SECINITSID_UNLABELED; sksec->sid = SECINITSID_UNLABELED; sksec->sclass = SECCLASS_SOCKET; selinux_netlbl_sk_security_reset(sksec); - sk->sk_security = sksec;
return 0; }
static void selinux_sk_free_security(struct sock *sk) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
- sk->sk_security = NULL; selinux_netlbl_sk_security_free(sksec); - kfree(sksec); }
static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk) { - struct sk_security_struct *sksec = sk->sk_security; - struct sk_security_struct *newsksec = newsk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); + struct sk_security_struct *newsksec = selinux_sock(newsk);
newsksec->sid = sksec->sid; newsksec->peer_sid = sksec->peer_sid; @@ -5279,7 +5272,7 @@ static void selinux_sk_getsecid(const struct sock *sk, u32 *secid) if (!sk) *secid = SECINITSID_ANY_SOCKET; else { - const struct sk_security_struct *sksec = sk->sk_security; + const struct sk_security_struct *sksec = selinux_sock(sk);
*secid = sksec->sid; } @@ -5289,7 +5282,7 @@ static void selinux_sock_graft(struct sock *sk, struct socket *parent) { struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(parent)); - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 || sk->sk_family == PF_UNIX) @@ -5306,7 +5299,7 @@ static int selinux_sctp_process_new_assoc(struct sctp_association *asoc, { struct sock *sk = asoc->base.sk; u16 family = sk->sk_family; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct common_audit_data ad; struct lsm_network_audit net; int err; @@ -5361,7 +5354,7 @@ static int selinux_sctp_process_new_assoc(struct sctp_association *asoc, static int selinux_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb) { - struct sk_security_struct *sksec = asoc->base.sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(asoc->base.sk); u32 conn_sid; int err;
@@ -5394,7 +5387,7 @@ static int selinux_sctp_assoc_request(struct sctp_association *asoc, static int selinux_sctp_assoc_established(struct sctp_association *asoc, struct sk_buff *skb) { - struct sk_security_struct *sksec = asoc->base.sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(asoc->base.sk);
if (!selinux_policycap_extsockclass()) return 0; @@ -5493,8 +5486,8 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname, static void selinux_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk) { - struct sk_security_struct *sksec = sk->sk_security; - struct sk_security_struct *newsksec = newsk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); + struct sk_security_struct *newsksec = selinux_sock(newsk);
/* If policy does not support SECCLASS_SCTP_SOCKET then call * the non-sctp clone version. @@ -5510,8 +5503,8 @@ static void selinux_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk
static int selinux_mptcp_add_subflow(struct sock *sk, struct sock *ssk) { - struct sk_security_struct *ssksec = ssk->sk_security; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *ssksec = selinux_sock(ssk); + struct sk_security_struct *sksec = selinux_sock(sk);
ssksec->sclass = sksec->sclass; ssksec->sid = sksec->sid; @@ -5526,7 +5519,7 @@ static int selinux_mptcp_add_subflow(struct sock *sk, struct sock *ssk) static int selinux_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); int err; u16 family = req->rsk_ops->family; u32 connsid; @@ -5547,7 +5540,7 @@ static int selinux_inet_conn_request(const struct sock *sk, struct sk_buff *skb, static void selinux_inet_csk_clone(struct sock *newsk, const struct request_sock *req) { - struct sk_security_struct *newsksec = newsk->sk_security; + struct sk_security_struct *newsksec = selinux_sock(newsk);
newsksec->sid = req->secid; newsksec->peer_sid = req->peer_secid; @@ -5564,7 +5557,7 @@ static void selinux_inet_csk_clone(struct sock *newsk, static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) { u16 family = sk->sk_family; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
/* handle mapped IPv4 packets arriving via IPv6 sockets */ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) @@ -5639,7 +5632,7 @@ static int selinux_tun_dev_attach_queue(void *security) static int selinux_tun_dev_attach(struct sock *sk, void *security) { struct tun_security_struct *tunsec = security; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
/* we don't currently perform any NetLabel based labeling here and it * isn't clear that we would want to do so anyway; while we could apply @@ -5762,7 +5755,7 @@ static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb, return NF_ACCEPT;
/* standard practice, label using the parent socket */ - sksec = sk->sk_security; + sksec = selinux_sock(sk); sid = sksec->sid; } else sid = SECINITSID_KERNEL; @@ -5785,7 +5778,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, sk = skb_to_full_sk(skb); if (sk == NULL) return NF_ACCEPT; - sksec = sk->sk_security; + sksec = selinux_sock(sk);
ad_net_init_from_iif(&ad, &net, state->out->ifindex, state->pf); if (selinux_parse_skb(skb, &ad, NULL, 0, &proto)) @@ -5874,7 +5867,7 @@ static unsigned int selinux_ip_postroute(void *priv, u32 skb_sid; struct sk_security_struct *sksec;
- sksec = sk->sk_security; + sksec = selinux_sock(sk); if (selinux_skb_peerlbl_sid(skb, family, &skb_sid)) return NF_DROP; /* At this point, if the returned skb peerlbl is SECSID_NULL @@ -5903,7 +5896,7 @@ static unsigned int selinux_ip_postroute(void *priv, } else { /* Locally generated packet, fetch the security label from the * associated socket. */ - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); peer_sid = sksec->sid; secmark_perm = PACKET__SEND; } @@ -5946,7 +5939,7 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) unsigned int data_len = skb->len; unsigned char *data = skb->data; struct nlmsghdr *nlh; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); u16 sclass = sksec->sclass; u32 perm;
@@ -7004,6 +6997,7 @@ struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct inode_security_struct), .lbs_ipc = sizeof(struct ipc_security_struct), .lbs_msg_msg = sizeof(struct msg_security_struct), + .lbs_sock = sizeof(struct sk_security_struct), .lbs_superblock = sizeof(struct superblock_security_struct), .lbs_xattr_count = SELINUX_INODE_INIT_XATTRS, }; diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index dea1d6f3ed2d..b074099acbaf 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -195,4 +195,9 @@ selinux_superblock(const struct super_block *superblock) return superblock->s_security + selinux_blob_sizes.lbs_superblock; }
+static inline struct sk_security_struct *selinux_sock(const struct sock *sock) +{ + return sock->sk_security + selinux_blob_sizes.lbs_sock; +} + #endif /* _SELINUX_OBJSEC_H_ */ diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index 55885634e880..fbe5f8c29f81 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c @@ -17,6 +17,7 @@ #include <linux/gfp.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/lsm_hooks.h> #include <net/sock.h> #include <net/netlabel.h> #include <net/ip.h> @@ -68,7 +69,7 @@ static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb, static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk) { int rc; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct netlbl_lsm_secattr *secattr;
if (sksec->nlbl_secattr != NULL) @@ -100,7 +101,7 @@ static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr( const struct sock *sk, u32 sid) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
if (secattr == NULL) @@ -240,7 +241,7 @@ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb, * being labeled by it's parent socket, if it is just exit */ sk = skb_to_full_sk(skb); if (sk != NULL) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
if (sksec->nlbl_state != NLBL_REQSKB) return 0; @@ -277,7 +278,7 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_association *asoc, { int rc; struct netlbl_lsm_secattr secattr; - struct sk_security_struct *sksec = asoc->base.sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(asoc->base.sk); struct sockaddr_in addr4; struct sockaddr_in6 addr6;
@@ -356,7 +357,7 @@ int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family) */ void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
if (family == PF_INET) sksec->nlbl_state = NLBL_LABELED; @@ -374,8 +375,8 @@ void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family) */ void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk) { - struct sk_security_struct *sksec = sk->sk_security; - struct sk_security_struct *newsksec = newsk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); + struct sk_security_struct *newsksec = selinux_sock(newsk);
newsksec->nlbl_state = sksec->nlbl_state; } @@ -393,7 +394,7 @@ void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk) int selinux_netlbl_socket_post_create(struct sock *sk, u16 family) { int rc; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct netlbl_lsm_secattr *secattr;
if (family != PF_INET && family != PF_INET6) @@ -510,7 +511,7 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock, { int rc = 0; struct sock *sk = sock->sk; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct netlbl_lsm_secattr secattr;
if (selinux_netlbl_option(level, optname) && @@ -548,7 +549,7 @@ static int selinux_netlbl_socket_connect_helper(struct sock *sk, struct sockaddr *addr) { int rc; - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk); struct netlbl_lsm_secattr *secattr;
/* connected sockets are allowed to disconnect when the address family @@ -587,7 +588,7 @@ static int selinux_netlbl_socket_connect_helper(struct sock *sk, int selinux_netlbl_socket_connect_locked(struct sock *sk, struct sockaddr *addr) { - struct sk_security_struct *sksec = sk->sk_security; + struct sk_security_struct *sksec = selinux_sock(sk);
if (sksec->nlbl_state != NLBL_REQSKB && sksec->nlbl_state != NLBL_CONNLABELED) diff --git a/security/smack/smack.h b/security/smack/smack.h index 041688e5a77a..297f21446f45 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -355,6 +355,11 @@ static inline struct superblock_smack *smack_superblock( return superblock->s_security + smack_blob_sizes.lbs_superblock; }
+static inline struct socket_smack *smack_sock(const struct sock *sock) +{ + return sock->sk_security + smack_blob_sizes.lbs_sock; +} + /* * Is the directory transmuting? */ diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 002a1b9ed83a..6ec9a40f3ec5 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -1606,7 +1606,7 @@ static int smack_inode_getsecurity(struct mnt_idmap *idmap, if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP;
- ssp = sock->sk->sk_security; + ssp = smack_sock(sock->sk);
if (strcmp(name, XATTR_SMACK_IPIN) == 0) isp = ssp->smk_in; @@ -1994,7 +1994,7 @@ static int smack_file_receive(struct file *file)
if (inode->i_sb->s_magic == SOCKFS_MAGIC) { sock = SOCKET_I(inode); - ssp = sock->sk->sk_security; + ssp = smack_sock(sock->sk); tsp = smack_cred(current_cred()); /* * If the receiving process can't write to the @@ -2409,11 +2409,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { struct smack_known *skp = smk_of_current(); - struct socket_smack *ssp; - - ssp = kzalloc(sizeof(struct socket_smack), gfp_flags); - if (ssp == NULL) - return -ENOMEM; + struct socket_smack *ssp = smack_sock(sk);
/* * Sockets created by kernel threads receive web label. @@ -2427,11 +2423,10 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) } ssp->smk_packet = NULL;
- sk->sk_security = ssp; - return 0; }
+#ifdef SMACK_IPV6_PORT_LABELING /** * smack_sk_free_security - Free a socket blob * @sk: the socket @@ -2440,7 +2435,6 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) */ static void smack_sk_free_security(struct sock *sk) { -#ifdef SMACK_IPV6_PORT_LABELING struct smk_port_label *spp;
if (sk->sk_family == PF_INET6) { @@ -2453,9 +2447,8 @@ static void smack_sk_free_security(struct sock *sk) } rcu_read_unlock(); } -#endif - kfree(sk->sk_security); } +#endif
/** * smack_sk_clone_security - Copy security context @@ -2466,8 +2459,8 @@ static void smack_sk_free_security(struct sock *sk) */ static void smack_sk_clone_security(const struct sock *sk, struct sock *newsk) { - struct socket_smack *ssp_old = sk->sk_security; - struct socket_smack *ssp_new = newsk->sk_security; + struct socket_smack *ssp_old = smack_sock(sk); + struct socket_smack *ssp_new = smack_sock(newsk);
*ssp_new = *ssp_old; } @@ -2583,7 +2576,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) */ static int smack_netlbl_add(struct sock *sk) { - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp = ssp->smk_out; int rc;
@@ -2616,7 +2609,7 @@ static int smack_netlbl_add(struct sock *sk) */ static void smack_netlbl_delete(struct sock *sk) { - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk);
/* * Take the label off the socket if one is set. @@ -2648,7 +2641,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) struct smack_known *skp; int rc = 0; struct smack_known *hkp; - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk); struct smk_audit_info ad;
rcu_read_lock(); @@ -2721,7 +2714,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) { struct sock *sk = sock->sk; struct sockaddr_in6 *addr6; - struct socket_smack *ssp = sock->sk->sk_security; + struct socket_smack *ssp = smack_sock(sock->sk); struct smk_port_label *spp; unsigned short port = 0;
@@ -2809,7 +2802,7 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, int act) { struct smk_port_label *spp; - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp = NULL; unsigned short port; struct smack_known *object; @@ -2912,7 +2905,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name, if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP;
- ssp = sock->sk->sk_security; + ssp = smack_sock(sock->sk);
if (strcmp(name, XATTR_SMACK_IPIN) == 0) ssp->smk_in = skp; @@ -2960,7 +2953,7 @@ static int smack_socket_post_create(struct socket *sock, int family, * Sockets created by kernel threads receive web label. */ if (unlikely(current->flags & PF_KTHREAD)) { - ssp = sock->sk->sk_security; + ssp = smack_sock(sock->sk); ssp->smk_in = &smack_known_web; ssp->smk_out = &smack_known_web; } @@ -2985,8 +2978,8 @@ static int smack_socket_post_create(struct socket *sock, int family, static int smack_socket_socketpair(struct socket *socka, struct socket *sockb) { - struct socket_smack *asp = socka->sk->sk_security; - struct socket_smack *bsp = sockb->sk->sk_security; + struct socket_smack *asp = smack_sock(socka->sk); + struct socket_smack *bsp = smack_sock(sockb->sk);
asp->smk_packet = bsp->smk_out; bsp->smk_packet = asp->smk_out; @@ -3049,7 +3042,7 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, if (__is_defined(SMACK_IPV6_SECMARK_LABELING)) rsp = smack_ipv6host_label(sip); if (rsp != NULL) { - struct socket_smack *ssp = sock->sk->sk_security; + struct socket_smack *ssp = smack_sock(sock->sk);
rc = smk_ipv6_check(ssp->smk_out, rsp, sip, SMK_CONNECTING); @@ -3844,9 +3837,9 @@ static int smack_unix_stream_connect(struct sock *sock, { struct smack_known *skp; struct smack_known *okp; - struct socket_smack *ssp = sock->sk_security; - struct socket_smack *osp = other->sk_security; - struct socket_smack *nsp = newsk->sk_security; + struct socket_smack *ssp = smack_sock(sock); + struct socket_smack *osp = smack_sock(other); + struct socket_smack *nsp = smack_sock(newsk); struct smk_audit_info ad; int rc = 0; #ifdef CONFIG_AUDIT @@ -3898,8 +3891,8 @@ static int smack_unix_stream_connect(struct sock *sock, */ static int smack_unix_may_send(struct socket *sock, struct socket *other) { - struct socket_smack *ssp = sock->sk->sk_security; - struct socket_smack *osp = other->sk->sk_security; + struct socket_smack *ssp = smack_sock(sock->sk); + struct socket_smack *osp = smack_sock(other->sk); struct smk_audit_info ad; int rc;
@@ -3936,7 +3929,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name; #endif #ifdef SMACK_IPV6_SECMARK_LABELING - struct socket_smack *ssp = sock->sk->sk_security; + struct socket_smack *ssp = smack_sock(sock->sk); struct smack_known *rsp; #endif int rc = 0; @@ -4148,7 +4141,7 @@ static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family, netlbl_secattr_init(&secattr);
if (sk) - ssp = sk->sk_security; + ssp = smack_sock(sk);
if (netlbl_skbuff_getattr(skb, family, &secattr) == 0) { skp = smack_from_secattr(&secattr, ssp); @@ -4170,7 +4163,7 @@ static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family, */ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp = NULL; int rc = 0; struct smk_audit_info ad; @@ -4274,7 +4267,7 @@ static int smack_socket_getpeersec_stream(struct socket *sock, u32 slen = 1; int rc = 0;
- ssp = sock->sk->sk_security; + ssp = smack_sock(sock->sk); if (ssp->smk_packet != NULL) { rcp = ssp->smk_packet->smk_known; slen = strlen(rcp) + 1; @@ -4324,7 +4317,7 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
switch (family) { case PF_UNIX: - ssp = sock->sk->sk_security; + ssp = smack_sock(sock->sk); s = ssp->smk_out->smk_secid; break; case PF_INET: @@ -4373,7 +4366,7 @@ static void smack_sock_graft(struct sock *sk, struct socket *parent) (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) return;
- ssp = sk->sk_security; + ssp = smack_sock(sk); ssp->smk_in = skp; ssp->smk_out = skp; /* cssp->smk_packet is already set in smack_inet_csk_clone() */ @@ -4393,7 +4386,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb, { u16 family = sk->sk_family; struct smack_known *skp; - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk); struct sockaddr_in addr; struct iphdr *hdr; struct smack_known *hskp; @@ -4479,7 +4472,7 @@ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb, static void smack_inet_csk_clone(struct sock *sk, const struct request_sock *req) { - struct socket_smack *ssp = sk->sk_security; + struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp;
if (req->peer_secid != 0) { @@ -5049,6 +5042,7 @@ struct lsm_blob_sizes smack_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct inode_smack), .lbs_ipc = sizeof(struct smack_known *), .lbs_msg_msg = sizeof(struct smack_known *), + .lbs_sock = sizeof(struct socket_smack), .lbs_superblock = sizeof(struct superblock_smack), .lbs_xattr_count = SMACK_INODE_INIT_XATTRS, }; @@ -5173,7 +5167,9 @@ static struct security_hook_list smack_hooks[] __ro_after_init = { LSM_HOOK_INIT(socket_getpeersec_stream, smack_socket_getpeersec_stream), LSM_HOOK_INIT(socket_getpeersec_dgram, smack_socket_getpeersec_dgram), LSM_HOOK_INIT(sk_alloc_security, smack_sk_alloc_security), +#ifdef SMACK_IPV6_PORT_LABELING LSM_HOOK_INIT(sk_free_security, smack_sk_free_security), +#endif LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security), LSM_HOOK_INIT(sock_graft, smack_sock_graft), LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request), diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c index b945c1d3a743..bad71b7e648d 100644 --- a/security/smack/smack_netfilter.c +++ b/security/smack/smack_netfilter.c @@ -26,8 +26,8 @@ static unsigned int smack_ip_output(void *priv, struct socket_smack *ssp; struct smack_known *skp;
- if (sk && sk->sk_security) { - ssp = sk->sk_security; + if (sk) { + ssp = smack_sock(sk); skp = ssp->smk_out; skb->secmark = skp->smk_secid; } diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index e22aad7604e8..5dd1e164f9b1 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -932,7 +932,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, } if (rc >= 0) { old_cat = skp->smk_netlabel.attr.mls.cat; - skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat; + rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; synchronize_rcu(); netlbl_catmap_free(old_cat); diff --git a/sound/pci/hda/cs35l41_hda_spi.c b/sound/pci/hda/cs35l41_hda_spi.c index b76c0dfd5fef..f8c356ad0d34 100644 --- a/sound/pci/hda/cs35l41_hda_spi.c +++ b/sound/pci/hda/cs35l41_hda_spi.c @@ -38,6 +38,7 @@ static const struct spi_device_id cs35l41_hda_spi_id[] = { { "cs35l41-hda", 0 }, {} }; +MODULE_DEVICE_TABLE(spi, cs35l41_hda_spi_id);
static const struct acpi_device_id cs35l41_acpi_hda_match[] = { { "CSC3551", 0 }, diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c index 89d8235537cd..f58f434e7110 100644 --- a/sound/pci/hda/tas2781_hda_i2c.c +++ b/sound/pci/hda/tas2781_hda_i2c.c @@ -818,7 +818,7 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt) } else return -ENODEV;
- tas_hda->priv->irq_info.irq = clt->irq; + tas_hda->priv->irq = clt->irq; ret = tas2781_read_acpi(tas_hda->priv, device_name); if (ret) return dev_err_probe(tas_hda->dev, ret, diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index e3aca9c785a0..aa163ec40862 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -2903,8 +2903,10 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682) }
if (dev->of_node) { - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); + if (ret) + return ret; } else { ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, init.name, diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c index f50f196d700d..ce2e88e066f3 100644 --- a/sound/soc/codecs/rt5682s.c +++ b/sound/soc/codecs/rt5682s.c @@ -2828,7 +2828,9 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component) }
if (dev->of_node) { - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, dai_clk_hw); + if (ret) + return ret; } else { ret = devm_clk_hw_register_clkdev(dev, dai_clk_hw, init.name, dev_name(dev)); diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c index 1fbf4560f5cc..28d8b4d7b985 100644 --- a/sound/soc/codecs/tas2781-comlib.c +++ b/sound/soc/codecs/tas2781-comlib.c @@ -14,7 +14,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -411,8 +410,6 @@ EXPORT_SYMBOL_GPL(tasdevice_dsp_remove);
void tasdevice_remove(struct tasdevice_priv *tas_priv) { - if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) - gpio_free(tas_priv->irq_info.irq_gpio); mutex_destroy(&tas_priv->codec_lock); } EXPORT_SYMBOL_GPL(tasdevice_remove); diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c index 8f9a3ae7153e..f3a7605f0710 100644 --- a/sound/soc/codecs/tas2781-fmwlib.c +++ b/sound/soc/codecs/tas2781-fmwlib.c @@ -13,7 +13,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/slab.h> diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c index cf8bc7ede6c7..ea9c6bafa1c3 100644 --- a/sound/soc/codecs/tas2781-i2c.c +++ b/sound/soc/codecs/tas2781-i2c.c @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -30,6 +29,7 @@ #include <sound/soc.h> #include <sound/tas2781.h> #include <sound/tlv.h> +#include <sound/tas2563-tlv.h> #include <sound/tas2781-tlv.h> #include <asm/unaligned.h>
@@ -757,7 +757,7 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) { struct i2c_client *client = (struct i2c_client *)tas_priv->client; unsigned int dev_addrs[TASDEVICE_MAX_CHANNELS]; - int rc, i, ndev = 0; + int i, ndev = 0;
if (tas_priv->isacpi) { ndev = device_property_read_u32_array(&client->dev, @@ -772,7 +772,7 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) "ti,audio-slots", dev_addrs, ndev); }
- tas_priv->irq_info.irq_gpio = + tas_priv->irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(&client->dev), 0); } else if (IS_ENABLED(CONFIG_OF)) { struct device_node *np = tas_priv->dev->of_node; @@ -784,7 +784,7 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) dev_addrs[ndev++] = addr; }
- tas_priv->irq_info.irq_gpio = of_irq_get(np, 0); + tas_priv->irq = of_irq_get(np, 0); } else { ndev = 1; dev_addrs[0] = client->addr; @@ -794,29 +794,12 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv) tas_priv->tasdevice[i].dev_addr = dev_addrs[i];
tas_priv->reset = devm_gpiod_get_optional(&client->dev, - "reset-gpios", GPIOD_OUT_HIGH); + "reset", GPIOD_OUT_HIGH); if (IS_ERR(tas_priv->reset)) dev_err(tas_priv->dev, "%s Can't get reset GPIO\n", __func__);
strcpy(tas_priv->dev_name, tasdevice_id[tas_priv->chip_id].name); - - if (gpio_is_valid(tas_priv->irq_info.irq_gpio)) { - rc = gpio_request(tas_priv->irq_info.irq_gpio, - "AUDEV-IRQ"); - if (!rc) { - gpio_direction_input( - tas_priv->irq_info.irq_gpio); - - tas_priv->irq_info.irq = - gpio_to_irq(tas_priv->irq_info.irq_gpio); - } else - dev_err(tas_priv->dev, "%s: GPIO %d request error\n", - __func__, tas_priv->irq_info.irq_gpio); - } else - dev_err(tas_priv->dev, - "Looking up irq-gpio property failed %d\n", - tas_priv->irq_info.irq_gpio); }
static int tasdevice_i2c_probe(struct i2c_client *i2c) diff --git a/sound/soc/loongson/loongson_card.c b/sound/soc/loongson/loongson_card.c index fae5e9312bf0..2c8dbdba27c5 100644 --- a/sound/soc/loongson/loongson_card.c +++ b/sound/soc/loongson/loongson_card.c @@ -127,8 +127,8 @@ static int loongson_card_parse_of(struct loongson_card_data *data) codec = of_get_child_by_name(dev->of_node, "codec"); if (!codec) { dev_err(dev, "audio-codec property missing or invalid\n"); - ret = -EINVAL; - goto err; + of_node_put(cpu); + return -EINVAL; }
for (i = 0; i < card->num_links; i++) { diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 6789c7a4d5ca..3b57ba095ab6 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -561,9 +561,10 @@ static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool f case BTF_KIND_ENUM64: { int name_off = t->name_off;
- /* Use name of the first element for anonymous enums if allowed */ - if (!from_ref && !t->name_off && btf_vlen(t)) - name_off = btf_enum(t)->name_off; + if (!from_ref && !name_off && btf_vlen(t)) + name_off = btf_kind(t) == BTF_KIND_ENUM64 ? + btf_enum64(t)->name_off : + btf_enum(t)->name_off;
return btf__name_by_offset(btf, name_off); } diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index d8288936c912..c4f1f1735af6 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -15,6 +15,7 @@ INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi) CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS) CFLAGS += $(EXTRA_CFLAGS) LDFLAGS += $(EXTRA_LDFLAGS) +LDLIBS += -lelf -lz
# Try to detect best kernel BTF source KERNEL_REL := $(shell uname -r) @@ -51,7 +52,7 @@ clean: libbpf_hdrs: $(BPFOBJ)
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) - $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index dd0a18c2ef8f..6f4bf386a3b5 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -134,10 +134,6 @@ #undef main #endif
-#define main main_test_libcapstone -# include "test-libcapstone.c" -#undef main - #define main main_test_lzma # include "test-lzma.c" #undef main diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h index f9ab28421e6d..9ec9c24f38c0 100644 --- a/tools/include/nolibc/string.h +++ b/tools/include/nolibc/string.h @@ -7,6 +7,7 @@ #ifndef _NOLIBC_STRING_H #define _NOLIBC_STRING_H
+#include "arch.h" #include "std.h"
static void *malloc(size_t len); diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 32c00db3b91b..40aae244e35f 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -996,6 +996,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); btf->start_str_off = base_btf->hdr->str_len; + btf->swapped_endian = base_btf->swapped_endian; }
/* +1 for empty string at offset 0 */ @@ -5394,6 +5395,9 @@ int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, new_base = btf__new_empty(); if (!new_base) return libbpf_err(-ENOMEM); + + btf__set_endianness(new_base, btf__endianness(src_btf)); + dist.id_map = calloc(n, sizeof(*dist.id_map)); if (!dist.id_map) { err = -ENOMEM; diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index 17f8b32f94a0..4f7399d85eab 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (c) 2024, Oracle and/or its affiliates. */
#ifndef _GNU_SOURCE diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a3be6f8fac09..d3a542649e6b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -496,8 +496,6 @@ struct bpf_program { };
struct bpf_struct_ops { - const char *tname; - const struct btf_type *type; struct bpf_program **progs; __u32 *kern_func_off; /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ @@ -1083,11 +1081,14 @@ static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj) continue;
for (j = 0; j < obj->nr_maps; ++j) { + const struct btf_type *type; + map = &obj->maps[j]; if (!bpf_map__is_struct_ops(map)) continue;
- vlen = btf_vlen(map->st_ops->type); + type = btf__type_by_id(obj->btf, map->st_ops->type_id); + vlen = btf_vlen(type); for (k = 0; k < vlen; ++k) { slot_prog = map->st_ops->progs[k]; if (prog != slot_prog) @@ -1121,8 +1122,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map) int err;
st_ops = map->st_ops; - type = st_ops->type; - tname = st_ops->tname; + type = btf__type_by_id(btf, st_ops->type_id); + tname = btf__name_by_offset(btf, type->name_off); err = find_struct_ops_kern_types(obj, tname, &mod_btf, &kern_type, &kern_type_id, &kern_vtype, &kern_vtype_id, @@ -1423,8 +1424,6 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, memcpy(st_ops->data, data->d_buf + vsi->offset, type->size); - st_ops->tname = tname; - st_ops->type = type; st_ops->type_id = type_id;
pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", @@ -7906,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object }
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, + const char *obj_name, const struct bpf_object_open_opts *opts) { - const char *obj_name, *kconfig, *btf_tmp_path, *token_path; + const char *kconfig, *btf_tmp_path, *token_path; struct bpf_object *obj; - char tmp_name[64]; int err; char *log_buf; size_t log_size; __u32 log_level;
+ if (obj_buf && !obj_name) + return ERR_PTR(-EINVAL); + if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", path ? : "(mem buf)"); @@ -7925,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, if (!OPTS_VALID(opts, bpf_object_open_opts)) return ERR_PTR(-EINVAL);
- obj_name = OPTS_GET(opts, object_name, NULL); + obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; if (obj_buf) { - if (!obj_name) { - snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", - (unsigned long)obj_buf, - (unsigned long)obj_buf_sz); - obj_name = tmp_name; - } path = obj_name; pr_debug("loading object '%s' from buffer\n", obj_name); + } else { + pr_debug("loading object from %s\n", path); }
log_buf = OPTS_GET(opts, kernel_log_buf, NULL); @@ -8018,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) if (!path) return libbpf_err_ptr(-EINVAL);
- pr_debug("loading %s\n", path); - - return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); + return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); }
struct bpf_object *bpf_object__open(const char *path) @@ -8032,10 +8028,15 @@ struct bpf_object * bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts) { + char tmp_name[64]; + if (!obj_buf || obj_buf_sz == 0) return libbpf_err_ptr(-EINVAL);
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); + /* create a (quite useless) default "name" for this memory buffer object */ + snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); + + return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); }
static int bpf_object_unload(struct bpf_object *obj) @@ -8445,11 +8446,13 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
static void bpf_map_prepare_vdata(const struct bpf_map *map) { + const struct btf_type *type; struct bpf_struct_ops *st_ops; __u32 i;
st_ops = map->st_ops; - for (i = 0; i < btf_vlen(st_ops->type); i++) { + type = btf__type_by_id(map->obj->btf, st_ops->type_id); + for (i = 0; i < btf_vlen(type); i++) { struct bpf_program *prog = st_ops->progs[i]; void *kern_data; int prog_fd; @@ -9712,6 +9715,7 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) { + const struct btf_type *type; const struct btf_member *member; struct bpf_struct_ops *st_ops; struct bpf_program *prog; @@ -9771,13 +9775,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, } insn_idx = sym->st_value / BPF_INSN_SZ;
- member = find_member_by_offset(st_ops->type, moff * 8); + type = btf__type_by_id(btf, st_ops->type_id); + member = find_member_by_offset(type, moff * 8); if (!member) { pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", map->name, moff); return -EINVAL; } - member_idx = member - btf_members(st_ops->type); + member_idx = member - btf_members(type); name = btf__name_by_offset(btf, member->name_off);
if (!resolve_func_ptr(btf, member->type, NULL)) { @@ -13758,29 +13763,13 @@ static int populate_skeleton_progs(const struct bpf_object *obj, int bpf_object__open_skeleton(struct bpf_object_skeleton *s, const struct bpf_object_open_opts *opts) { - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, - .object_name = s->name, - ); struct bpf_object *obj; int err;
- /* Attempt to preserve opts->object_name, unless overriden by user - * explicitly. Overwriting object name for skeletons is discouraged, - * as it breaks global data maps, because they contain object name - * prefix as their own map name prefix. When skeleton is generated, - * bpftool is making an assumption that this name will stay the same. - */ - if (opts) { - memcpy(&skel_opts, opts, sizeof(*opts)); - if (!opts->object_name) - skel_opts.object_name = s->name; - } - - obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); - err = libbpf_get_error(obj); - if (err) { - pr_warn("failed to initialize skeleton BPF object '%s': %d\n", - s->name, err); + obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err); return libbpf_err(err); }
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c index aee479d2191c..69b66994f2a1 100644 --- a/tools/objtool/arch/loongarch/decode.c +++ b/tools/objtool/arch/loongarch/decode.c @@ -122,7 +122,7 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst, switch (inst.reg2i12_format.opcode) { case addid_op: if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) { - /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */ + /* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */ insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); ADD_OP(op) { op->src.type = OP_SRC_ADD; @@ -132,6 +132,15 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst, op->dest.reg = inst.reg2i12_format.rd; } } + if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) { + /* addi.d sp,fp,si12 */ + struct symbol *func = find_func_containing(insn->sec, insn->offset); + + if (!func) + return false; + + func->frame_pointer = true; + } break; case ldd_op: if (inst.reg2i12_format.rj == CFI_SP) { diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 01237d167223..af9cfed7f4ec 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2993,10 +2993,27 @@ static int update_cfi_state(struct instruction *insn, break; }
- if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { + if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && + insn->sym->frame_pointer) { + /* addi.d fp,sp,imm on LoongArch */ + if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { + cfa->base = CFI_BP; + cfa->offset = 0; + } + break; + }
- /* lea disp(%rbp), %rsp */ - cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); + if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { + /* addi.d sp,fp,imm on LoongArch */ + if (cfa->base == CFI_BP && cfa->offset == 0) { + if (insn->sym->frame_pointer) { + cfa->base = CFI_SP; + cfa->offset = -op->src.offset; + } + } else { + /* lea disp(%rbp), %rsp */ + cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); + } break; }
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index 2b8a69de4db8..d7e815c2fd15 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -68,6 +68,7 @@ struct symbol { u8 warned : 1; u8 embedded_insn : 1; u8 local_label : 1; + u8 frame_pointer : 1; struct list_head pv_target; struct reloc *relocs; }; diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index c157bd31f2e5..7298f3607062 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -3266,7 +3266,7 @@ static int perf_c2c__record(int argc, const char **argv) return -1; }
- if (perf_pmu__mem_events_init(pmu)) { + if (perf_pmu__mem_events_init()) { pr_err("failed: memory events not supported\n"); return -1; } @@ -3290,19 +3290,15 @@ static int perf_c2c__record(int argc, const char **argv) * PERF_MEM_EVENTS__LOAD_STORE if it is supported. */ if (e->tag) { - e->record = true; + perf_mem_record[PERF_MEM_EVENTS__LOAD_STORE] = true; rec_argv[i++] = "-W"; } else { - e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); - e->record = true; - - e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE); - e->record = true; + perf_mem_record[PERF_MEM_EVENTS__LOAD] = true; + perf_mem_record[PERF_MEM_EVENTS__STORE] = true; } }
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); - if (e->record) + if (perf_mem_record[PERF_MEM_EVENTS__LOAD]) rec_argv[i++] = "-W";
rec_argv[i++] = "-d"; diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index a212678d47be..c80fb0f60e61 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -2204,6 +2204,7 @@ int cmd_inject(int argc, const char **argv) .finished_init = perf_event__repipe_op2_synth, .compressed = perf_event__repipe_op4_synth, .auxtrace = perf_event__repipe_auxtrace, + .dont_split_sample_group = true, }, .input_name = "-", .samples = LIST_HEAD_INIT(inject.samples), diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 863fcd735dae..08724fa508e1 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -97,7 +97,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) return -1; }
- if (perf_pmu__mem_events_init(pmu)) { + if (perf_pmu__mem_events_init()) { pr_err("failed: memory events not supported\n"); return -1; } @@ -126,22 +126,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) if (e->tag && (mem->operation & MEM_OPERATION_LOAD) && (mem->operation & MEM_OPERATION_STORE)) { - e->record = true; + perf_mem_record[PERF_MEM_EVENTS__LOAD_STORE] = true; rec_argv[i++] = "-W"; } else { - if (mem->operation & MEM_OPERATION_LOAD) { - e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); - e->record = true; - } + if (mem->operation & MEM_OPERATION_LOAD) + perf_mem_record[PERF_MEM_EVENTS__LOAD] = true;
- if (mem->operation & MEM_OPERATION_STORE) { - e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE); - e->record = true; - } + if (mem->operation & MEM_OPERATION_STORE) + perf_mem_record[PERF_MEM_EVENTS__STORE] = true; }
- e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD); - if (e->record) + if (perf_mem_record[PERF_MEM_EVENTS__LOAD]) rec_argv[i++] = "-W";
rec_argv[i++] = "-d"; @@ -372,6 +367,7 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem) rep_argv[i] = argv[j];
ret = cmd_report(i, rep_argv); + free(new_sort_order); free(rep_argv); return ret; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 6edc0d4ce6fb..40826f825c9c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -565,6 +565,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c struct hists *hists = evsel__hists(pos); const char *evname = evsel__name(pos);
+ i++; if (symbol_conf.event_group && !evsel__is_group_leader(pos)) continue;
@@ -574,7 +575,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
if (rep->total_cycles_mode) { - report__browse_block_hists(&rep->block_reports[i++].hist, + report__browse_block_hists(&rep->block_reports[i - 1].hist, rep->min_percent, pos, NULL); continue; } diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 8750b5f2d49b..d0c44f6116ed 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -2683,9 +2683,12 @@ static int timehist_sched_change_event(struct perf_tool *tool, * - previous sched event is out of window - we are done * - sample time is beyond window user cares about - reset it * to close out stats for time window interest + * - If tprev is 0, that is, sched_in event for current task is + * not recorded, cannot determine whether sched_in event is + * within time window interest - ignore it */ if (ptime->end) { - if (tprev > ptime->end) + if (!tprev || tprev > ptime->end) goto out;
if (t > ptime->end) @@ -3121,7 +3124,8 @@ static int perf_sched__timehist(struct perf_sched *sched)
if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { pr_err("Invalid time string\n"); - return -EINVAL; + err = -EINVAL; + goto out; }
if (timehist_check_attr(sched, evlist) != 0) diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json index c9596e18ec09..6347eba48810 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json @@ -4577,7 +4577,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4588,7 +4588,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4599,7 +4599,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -4609,7 +4609,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -4619,7 +4619,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4630,7 +4630,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4651,7 +4651,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4662,7 +4662,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4673,7 +4673,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -4683,7 +4683,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -4693,7 +4693,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4704,7 +4704,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4747,7 +4747,7 @@ "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", "Experimental": "1", - "Filter": "config1=0x49033", + "Filter": "config1=0x4903300000000", "PerPkg": "1", "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.", "UMask": "0x24", @@ -4759,7 +4759,7 @@ "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR", "Experimental": "1", - "Filter": "config1=0x43C33", + "Filter": "config1=0x43c3300000000", "PerPkg": "1", "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.", "UMask": "0x24", @@ -4771,7 +4771,7 @@ "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO", "Experimental": "1", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.", "UMask": "0x24", @@ -4999,7 +4999,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -5010,7 +5010,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -5021,7 +5021,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -5031,7 +5031,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -5041,7 +5041,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -5052,7 +5052,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -5073,7 +5073,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -5084,7 +5084,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -5095,7 +5095,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -5105,7 +5105,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -5115,7 +5115,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -5126,7 +5126,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -5171,7 +5171,7 @@ "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM", "Experimental": "1", - "Filter": "config1=0x49033", + "Filter": "config1=0x4903300000000", "PerPkg": "1", "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.", "UMask": "0x24", @@ -5183,7 +5183,7 @@ "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR", "Experimental": "1", - "Filter": "config1=0x43C33", + "Filter": "config1=0x43c3300000000", "PerPkg": "1", "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.", "UMask": "0x24", @@ -5195,7 +5195,7 @@ "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", "Experimental": "1", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.", "UMask": "0x24", diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json index da46a3aeb58c..4fc818626491 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json @@ -4454,7 +4454,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4465,7 +4465,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4476,7 +4476,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -4486,7 +4486,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -4496,7 +4496,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4507,7 +4507,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4528,7 +4528,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4539,7 +4539,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4550,7 +4550,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -4560,7 +4560,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -4570,7 +4570,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4581,7 +4581,7 @@ "Counter": "0,1,2,3", "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4624,7 +4624,7 @@ "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM", "Experimental": "1", - "Filter": "config1=0x49033", + "Filter": "config1=0x4903300000000", "PerPkg": "1", "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.", "UMask": "0x24", @@ -4636,7 +4636,7 @@ "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR", "Experimental": "1", - "Filter": "config1=0x43C33", + "Filter": "config1=0x43c3300000000", "PerPkg": "1", "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.", "UMask": "0x24", @@ -4648,7 +4648,7 @@ "EventCode": "0x35", "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO", "Experimental": "1", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.", "UMask": "0x24", @@ -4865,7 +4865,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4876,7 +4876,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4887,7 +4887,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -4897,7 +4897,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x11", "Unit": "CHA" @@ -4907,7 +4907,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4918,7 +4918,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x11", @@ -4939,7 +4939,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD", - "Filter": "config1=0x40233", + "Filter": "config1=0x4023300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4950,7 +4950,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD", - "Filter": "config1=0x40433", + "Filter": "config1=0x4043300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4961,7 +4961,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD", - "Filter": "config1=0x4b233", + "Filter": "config1=0x4b23300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -4971,7 +4971,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD", - "Filter": "config1=0x4b433", + "Filter": "config1=0x4b43300000000", "PerPkg": "1", "UMask": "0x21", "Unit": "CHA" @@ -4981,7 +4981,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO", - "Filter": "config1=0x4b033", + "Filter": "config1=0x4b03300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -4992,7 +4992,7 @@ "Counter": "0", "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", "UMask": "0x21", @@ -5037,7 +5037,7 @@ "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM", "Experimental": "1", - "Filter": "config1=0x49033", + "Filter": "config1=0x4903300000000", "PerPkg": "1", "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.", "UMask": "0x24", @@ -5049,7 +5049,7 @@ "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR", "Experimental": "1", - "Filter": "config1=0x43C33", + "Filter": "config1=0x43c3300000000", "PerPkg": "1", "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.", "UMask": "0x24", @@ -5061,7 +5061,7 @@ "EventCode": "0x36", "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO", "Experimental": "1", - "Filter": "config1=0x40033", + "Filter": "config1=0x4003300000000", "PerPkg": "1", "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.", "UMask": "0x24", diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json index 7551fb91a9d7..a81776deb2e6 100644 --- a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json +++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json @@ -1,61 +1,4 @@ [ - { - "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss", - "Counter": "0,1,2,3", - "EventCode": "0x35", - "EventName": "LLC_MISSES.MMIO_READ", - "Filter": "config1=0x40040e33", - "PerPkg": "1", - "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", - "UMask": "0xc001fe01", - "Unit": "CHA" - }, - { - "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss", - "Counter": "0,1,2,3", - "EventCode": "0x35", - "EventName": "LLC_MISSES.MMIO_WRITE", - "Filter": "config1=0x40041e33", - "PerPkg": "1", - "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", - "UMask": "0xc001fe01", - "Unit": "CHA" - }, - { - "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss", - "Counter": "0,1,2,3", - "EventCode": "0x35", - "EventName": "LLC_MISSES.UNCACHEABLE", - "Filter": "config1=0x40e33", - "PerPkg": "1", - "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", - "UMask": "0xc001fe01", - "Unit": "CHA" - }, - { - "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss", - "Counter": "0,1,2,3", - "EventCode": "0x35", - "EventName": "LLC_REFERENCES.STREAMING_FULL", - "Filter": "config1=0x41833", - "PerPkg": "1", - "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", - "ScaleUnit": "64Bytes", - "UMask": "0xc001fe01", - "Unit": "CHA" - }, - { - "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss", - "Counter": "0,1,2,3", - "EventCode": "0x35", - "EventName": "LLC_REFERENCES.STREAMING_PARTIAL", - "Filter": "config1=0x41a33", - "PerPkg": "1", - "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.", - "ScaleUnit": "64Bytes", - "UMask": "0xc001fe01", - "Unit": "CHA" - }, { "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0", "Counter": "0,1,2,3", diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index d973c2baed1c..7aff02d84ffb 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -192,17 +192,16 @@ def process_event(param_dict): ip = sample["ip"] addr = sample["addr"]
+ if (options.verbose == True): + print("Event type: %s" % name) + print_sample(sample) + # Initialize CPU data if it's empty, and directly return back # if this is the first tracing event for this CPU. if (cpu_data.get(str(cpu) + 'addr') == None): cpu_data[str(cpu) + 'addr'] = addr return
- - if (options.verbose == True): - print("Event type: %s" % name) - print_sample(sample) - # If cannot find dso so cannot dump assembler, bail out if (dso == '[unknown]'): return diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 5d1f04f66a5a..e5491995adf0 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -62,7 +62,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, struct evsel *pos; char *buf = hpp->buf; size_t size = hpp->size; - int i, nr_members = 1; + int i = 0, nr_members = 1; struct hpp_fmt_value *values;
if (evsel__is_group_event(evsel)) @@ -72,16 +72,16 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, if (values == NULL) return 0;
- i = 0; - for_each_group_evsel(pos, evsel) - values[i++].hists = evsel__hists(pos); - + values[0].hists = evsel__hists(evsel); values[0].val = get_field(he); values[0].samples = he->stat.nr_events;
if (evsel__is_group_event(evsel)) { struct hist_entry *pair;
+ for_each_group_member(pos, evsel) + values[++i].hists = evsel__hists(pos); + list_for_each_entry(pair, &he->pairs.head, pairs.node) { for (i = 0; i < nr_members; i++) { if (values[i].hists != pair->hists) diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 0f18fe81ef0b..b24360c04aae 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -13,6 +13,7 @@ perf-util-y += copyfile.o perf-util-y += ctype.o perf-util-y += db-export.o perf-util-y += disasm.o +perf-util-y += disasm_bpf.o perf-util-y += env.o perf-util-y += event.o perf-util-y += evlist.o diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index 965da6c0b542..79c1f2ae7aff 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -104,7 +104,7 @@ static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg) return;
while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { - if (reg != DWARF_REG_PC && end < pc) + if (reg != DWARF_REG_PC && end <= pc) continue; if (reg != DWARF_REG_PC && start > pc) break; diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h index 36af11faad03..de12892f992f 100644 --- a/tools/perf/util/bpf_skel/lock_data.h +++ b/tools/perf/util/bpf_skel/lock_data.h @@ -7,11 +7,11 @@ struct tstamp_data { u64 timestamp; u64 lock; u32 flags; - u32 stack_id; + s32 stack_id; };
struct contention_key { - u32 stack_id; + s32 stack_id; u32 pid; u64 lock_addr_or_cgroup; }; diff --git a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h index e9028235d771..d818e30c5457 100644 --- a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h +++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h @@ -15,6 +15,7 @@
typedef __u8 u8; typedef __u32 u32; +typedef __s32 s32; typedef __u64 u64; typedef __s64 s64;
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c index e10558b79504..766cbd005f32 100644 --- a/tools/perf/util/disasm.c +++ b/tools/perf/util/disasm.c @@ -15,6 +15,7 @@ #include "build-id.h" #include "debug.h" #include "disasm.h" +#include "disasm_bpf.h" #include "dso.h" #include "env.h" #include "evsel.h" @@ -1164,192 +1165,6 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil return 0; }
-#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) -#define PACKAGE "perf" -#include <bfd.h> -#include <dis-asm.h> -#include <bpf/bpf.h> -#include <bpf/btf.h> -#include <bpf/libbpf.h> -#include <linux/btf.h> -#include <tools/dis-asm-compat.h> - -#include "bpf-event.h" -#include "bpf-utils.h" - -static int symbol__disassemble_bpf(struct symbol *sym, - struct annotate_args *args) -{ - struct annotation *notes = symbol__annotation(sym); - struct bpf_prog_linfo *prog_linfo = NULL; - struct bpf_prog_info_node *info_node; - int len = sym->end - sym->start; - disassembler_ftype disassemble; - struct map *map = args->ms.map; - struct perf_bpil *info_linear; - struct disassemble_info info; - struct dso *dso = map__dso(map); - int pc = 0, count, sub_id; - struct btf *btf = NULL; - char tpath[PATH_MAX]; - size_t buf_size; - int nr_skip = 0; - char *buf; - bfd *bfdf; - int ret; - FILE *s; - - if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO) - return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; - - pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, - sym->name, sym->start, sym->end - sym->start); - - memset(tpath, 0, sizeof(tpath)); - perf_exe(tpath, sizeof(tpath)); - - bfdf = bfd_openr(tpath, NULL); - if (bfdf == NULL) - abort(); - - if (!bfd_check_format(bfdf, bfd_object)) - abort(); - - s = open_memstream(&buf, &buf_size); - if (!s) { - ret = errno; - goto out; - } - init_disassemble_info_compat(&info, s, - (fprintf_ftype) fprintf, - fprintf_styled); - info.arch = bfd_get_arch(bfdf); - info.mach = bfd_get_mach(bfdf); - - info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env, - dso__bpf_prog(dso)->id); - if (!info_node) { - ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; - goto out; - } - info_linear = info_node->info_linear; - sub_id = dso__bpf_prog(dso)->sub_id; - - info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); - info.buffer_length = info_linear->info.jited_prog_len; - - if (info_linear->info.nr_line_info) - prog_linfo = bpf_prog_linfo__new(&info_linear->info); - - if (info_linear->info.btf_id) { - struct btf_node *node; - - node = perf_env__find_btf(dso__bpf_prog(dso)->env, - info_linear->info.btf_id); - if (node) - btf = btf__new((__u8 *)(node->data), - node->data_size); - } - - disassemble_init_for_target(&info); - -#ifdef DISASM_FOUR_ARGS_SIGNATURE - disassemble = disassembler(info.arch, - bfd_big_endian(bfdf), - info.mach, - bfdf); -#else - disassemble = disassembler(bfdf); -#endif - if (disassemble == NULL) - abort(); - - fflush(s); - do { - const struct bpf_line_info *linfo = NULL; - struct disasm_line *dl; - size_t prev_buf_size; - const char *srcline; - u64 addr; - - addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id]; - count = disassemble(pc, &info); - - if (prog_linfo) - linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo, - addr, sub_id, - nr_skip); - - if (linfo && btf) { - srcline = btf__name_by_offset(btf, linfo->line_off); - nr_skip++; - } else - srcline = NULL; - - fprintf(s, "\n"); - prev_buf_size = buf_size; - fflush(s); - - if (!annotate_opts.hide_src_code && srcline) { - args->offset = -1; - args->line = strdup(srcline); - args->line_nr = 0; - args->fileloc = NULL; - args->ms.sym = sym; - dl = disasm_line__new(args); - if (dl) { - annotation_line__add(&dl->al, - ¬es->src->source); - } - } - - args->offset = pc; - args->line = buf + prev_buf_size; - args->line_nr = 0; - args->fileloc = NULL; - args->ms.sym = sym; - dl = disasm_line__new(args); - if (dl) - annotation_line__add(&dl->al, ¬es->src->source); - - pc += count; - } while (count > 0 && pc < len); - - ret = 0; -out: - free(prog_linfo); - btf__free(btf); - fclose(s); - bfd_close(bfdf); - return ret; -} -#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) -static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, - struct annotate_args *args __maybe_unused) -{ - return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF; -} -#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) - -static int -symbol__disassemble_bpf_image(struct symbol *sym, - struct annotate_args *args) -{ - struct annotation *notes = symbol__annotation(sym); - struct disasm_line *dl; - - args->offset = -1; - args->line = strdup("to be implemented"); - args->line_nr = 0; - args->fileloc = NULL; - dl = disasm_line__new(args); - if (dl) - annotation_line__add(&dl->al, ¬es->src->source); - - zfree(&args->line); - return 0; -} - #ifdef HAVE_LIBCAPSTONE_SUPPORT #include <capstone/capstone.h>
diff --git a/tools/perf/util/disasm_bpf.c b/tools/perf/util/disasm_bpf.c new file mode 100644 index 000000000000..1fee71c79b62 --- /dev/null +++ b/tools/perf/util/disasm_bpf.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include "util/annotate.h" +#include "util/disasm_bpf.h" +#include "util/symbol.h" +#include <linux/zalloc.h> +#include <string.h> + +#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) +#define PACKAGE "perf" +#include <bfd.h> +#include <bpf/bpf.h> +#include <bpf/btf.h> +#include <bpf/libbpf.h> +#include <dis-asm.h> +#include <errno.h> +#include <linux/btf.h> +#include <tools/dis-asm-compat.h> + +#include "util/bpf-event.h" +#include "util/bpf-utils.h" +#include "util/debug.h" +#include "util/dso.h" +#include "util/map.h" +#include "util/env.h" +#include "util/util.h" + +int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args) +{ + struct annotation *notes = symbol__annotation(sym); + struct bpf_prog_linfo *prog_linfo = NULL; + struct bpf_prog_info_node *info_node; + int len = sym->end - sym->start; + disassembler_ftype disassemble; + struct map *map = args->ms.map; + struct perf_bpil *info_linear; + struct disassemble_info info; + struct dso *dso = map__dso(map); + int pc = 0, count, sub_id; + struct btf *btf = NULL; + char tpath[PATH_MAX]; + size_t buf_size; + int nr_skip = 0; + char *buf; + bfd *bfdf; + int ret; + FILE *s; + + if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO) + return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; + + pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, + sym->name, sym->start, sym->end - sym->start); + + memset(tpath, 0, sizeof(tpath)); + perf_exe(tpath, sizeof(tpath)); + + bfdf = bfd_openr(tpath, NULL); + if (bfdf == NULL) + abort(); + + if (!bfd_check_format(bfdf, bfd_object)) + abort(); + + s = open_memstream(&buf, &buf_size); + if (!s) { + ret = errno; + goto out; + } + init_disassemble_info_compat(&info, s, + (fprintf_ftype) fprintf, + fprintf_styled); + info.arch = bfd_get_arch(bfdf); + info.mach = bfd_get_mach(bfdf); + + info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env, + dso__bpf_prog(dso)->id); + if (!info_node) { + ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; + goto out; + } + info_linear = info_node->info_linear; + sub_id = dso__bpf_prog(dso)->sub_id; + + info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); + info.buffer_length = info_linear->info.jited_prog_len; + + if (info_linear->info.nr_line_info) + prog_linfo = bpf_prog_linfo__new(&info_linear->info); + + if (info_linear->info.btf_id) { + struct btf_node *node; + + node = perf_env__find_btf(dso__bpf_prog(dso)->env, + info_linear->info.btf_id); + if (node) + btf = btf__new((__u8 *)(node->data), + node->data_size); + } + + disassemble_init_for_target(&info); + +#ifdef DISASM_FOUR_ARGS_SIGNATURE + disassemble = disassembler(info.arch, + bfd_big_endian(bfdf), + info.mach, + bfdf); +#else + disassemble = disassembler(bfdf); +#endif + if (disassemble == NULL) + abort(); + + fflush(s); + do { + const struct bpf_line_info *linfo = NULL; + struct disasm_line *dl; + size_t prev_buf_size; + const char *srcline; + u64 addr; + + addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id]; + count = disassemble(pc, &info); + + if (prog_linfo) + linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo, + addr, sub_id, + nr_skip); + + if (linfo && btf) { + srcline = btf__name_by_offset(btf, linfo->line_off); + nr_skip++; + } else + srcline = NULL; + + fprintf(s, "\n"); + prev_buf_size = buf_size; + fflush(s); + + if (!annotate_opts.hide_src_code && srcline) { + args->offset = -1; + args->line = strdup(srcline); + args->line_nr = 0; + args->fileloc = NULL; + args->ms.sym = sym; + dl = disasm_line__new(args); + if (dl) { + annotation_line__add(&dl->al, + ¬es->src->source); + } + } + + args->offset = pc; + args->line = buf + prev_buf_size; + args->line_nr = 0; + args->fileloc = NULL; + args->ms.sym = sym; + dl = disasm_line__new(args); + if (dl) + annotation_line__add(&dl->al, ¬es->src->source); + + pc += count; + } while (count > 0 && pc < len); + + ret = 0; +out: + free(prog_linfo); + btf__free(btf); + fclose(s); + bfd_close(bfdf); + return ret; +} +#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) +int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, struct annotate_args *args __maybe_unused) +{ + return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF; +} +#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) + +int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args) +{ + struct annotation *notes = symbol__annotation(sym); + struct disasm_line *dl; + + args->offset = -1; + args->line = strdup("to be implemented"); + args->line_nr = 0; + args->fileloc = NULL; + dl = disasm_line__new(args); + if (dl) + annotation_line__add(&dl->al, ¬es->src->source); + + zfree(&args->line); + return 0; +} diff --git a/tools/perf/util/disasm_bpf.h b/tools/perf/util/disasm_bpf.h new file mode 100644 index 000000000000..2ecb19545388 --- /dev/null +++ b/tools/perf/util/disasm_bpf.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifndef __PERF_DISASM_BPF_H +#define __PERF_DISASM_BPF_H + +struct symbol; +struct annotate_args; + +int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args); +int symbol__disassemble_bpf_image(struct symbol *sym, struct annotate_args *args); + +#endif /* __PERF_DISASM_BPF_H */ diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 44ef968a7ad3..1b0e59f4d8e9 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1444,7 +1444,7 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { /* Assuming the location list is sorted by address */ - if (end < data->pc) + if (end <= data->pc) continue; if (start > data->pc) break; @@ -1598,6 +1598,9 @@ static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg) if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0) return DIE_FIND_CB_SIBLING;
+ if (!check_allowed_ops(ops, nops)) + return DIE_FIND_CB_SIBLING; + if (die_get_real_type(die_mem, &type_die) == NULL) return DIE_FIND_CB_SIBLING;
@@ -1974,8 +1977,15 @@ static int __die_find_member_offset_cb(Dwarf_Die *die_mem, void *arg) return DIE_FIND_CB_SIBLING;
/* Unions might not have location */ - if (die_get_data_member_location(die_mem, &loc) < 0) - loc = 0; + if (die_get_data_member_location(die_mem, &loc) < 0) { + Dwarf_Attribute attr; + + if (dwarf_attr_integrate(die_mem, DW_AT_data_bit_offset, &attr) && + dwarf_formudata(&attr, &loc) == 0) + loc /= 8; + else + loc = 0; + }
if (offset == loc) return DIE_FIND_CB_END; diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index be048bd02f36..051feb93ed8d 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -29,6 +29,8 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { }; #undef E
+bool perf_mem_record[PERF_MEM_EVENTS__MAX] = { 0 }; + static char mem_loads_name[100]; static char mem_stores_name[100];
@@ -163,7 +165,7 @@ int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str) continue;
if (strstr(e->tag, tok)) - e->record = found = true; + perf_mem_record[j] = found = true; }
tok = strtok_r(NULL, ",", &saveptr); @@ -192,7 +194,7 @@ static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu return !stat(path, &st); }
-int perf_pmu__mem_events_init(struct perf_pmu *pmu) +static int __perf_pmu__mem_events_init(struct perf_pmu *pmu) { const char *mnt = sysfs__mount(); bool found = false; @@ -219,6 +221,18 @@ int perf_pmu__mem_events_init(struct perf_pmu *pmu) return found ? 0 : -ENOENT; }
+int perf_pmu__mem_events_init(void) +{ + struct perf_pmu *pmu = NULL; + + while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) { + if (__perf_pmu__mem_events_init(pmu)) + return -ENOENT; + } + + return 0; +} + void perf_pmu__mem_events_list(struct perf_pmu *pmu) { int j; @@ -249,7 +263,7 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr) for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { e = perf_pmu__mem_events_ptr(pmu, j);
- if (!e->record) + if (!perf_mem_record[j]) continue;
if (!e->supported) { diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h index ca31014d7934..8dc27db9fd52 100644 --- a/tools/perf/util/mem-events.h +++ b/tools/perf/util/mem-events.h @@ -6,7 +6,6 @@ #include <linux/types.h>
struct perf_mem_event { - bool record; bool supported; bool ldlat; u32 aux_event; @@ -28,9 +27,10 @@ struct perf_pmu;
extern unsigned int perf_mem_events__loads_ldlat; extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX]; +extern bool perf_mem_record[PERF_MEM_EVENTS__MAX];
int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str); -int perf_pmu__mem_events_init(struct perf_pmu *pmu); +int perf_pmu__mem_events_init(void);
struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i); struct perf_pmu *perf_mem_events_find_pmu(void); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 5596bed1b8c8..080242c69196 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1511,6 +1511,9 @@ static int deliver_sample_group(struct evlist *evlist, int ret = -EINVAL; struct sample_read_value *v = sample->read.group.values;
+ if (tool->dont_split_sample_group) + return deliver_sample_value(evlist, tool, event, sample, v, machine); + sample_read_group__for_each(v, sample->read.group.nr, read_format) { ret = deliver_sample_value(evlist, tool, event, sample, v, machine); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index c38bcb6f4c78..ea96e4ebad8c 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -1237,7 +1237,8 @@ static void print_metric_headers(struct perf_stat_config *config,
/* Print metrics headers only */ evlist__for_each_entry(evlist, counter) { - if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) + if (!config->iostat_run && + config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) continue;
os.evsel = counter; diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c index 302443921681..1b91ccd4d523 100644 --- a/tools/perf/util/time-utils.c +++ b/tools/perf/util/time-utils.c @@ -20,7 +20,7 @@ int parse_nsec_time(const char *str, u64 *ptime) u64 time_sec, time_nsec; char *end;
- time_sec = strtoul(str, &end, 10); + time_sec = strtoull(str, &end, 10); if (*end != '.' && *end != '\0') return -1;
@@ -38,7 +38,7 @@ int parse_nsec_time(const char *str, u64 *ptime) for (i = strlen(nsec_buf); i < 9; i++) nsec_buf[i] = '0';
- time_nsec = strtoul(nsec_buf, &end, 10); + time_nsec = strtoull(nsec_buf, &end, 10); if (*end != '\0') return -1; } else diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h index c957fb849ac6..62bbc9cec151 100644 --- a/tools/perf/util/tool.h +++ b/tools/perf/util/tool.h @@ -85,6 +85,7 @@ struct perf_tool { bool namespace_events; bool cgroup_events; bool no_warn; + bool dont_split_sample_group; enum show_feature_header show_feat_hdr; };
diff --git a/tools/power/cpupower/lib/powercap.c b/tools/power/cpupower/lib/powercap.c index a7a59c6bacda..94a0c69e55ef 100644 --- a/tools/power/cpupower/lib/powercap.c +++ b/tools/power/cpupower/lib/powercap.c @@ -77,6 +77,14 @@ int powercap_get_enabled(int *mode) return sysfs_get_enabled(path, mode); }
+/* + * TODO: implement function. Returns dummy 0 for now. + */ +int powercap_set_enabled(int mode) +{ + return 0; +} + /* * Hardcoded, because rapl is the only powercap implementation - * this needs to get more generic if more powercap implementations diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile index 8f5febaf1a9a..edb3613513b8 100644 --- a/tools/testing/selftests/arm64/signal/Makefile +++ b/tools/testing/selftests/arm64/signal/Makefile @@ -23,7 +23,7 @@ $(TEST_GEN_PROGS): $(PROGS) # Common test-unit targets to build common-layout test-cases executables # Needs secondary expansion to properly include the testcase c-file in pre-reqs COMMON_SOURCES := test_signals.c test_signals_utils.c testcases/testcases.c \ - signals.S + signals.S sve_helpers.c COMMON_HEADERS := test_signals.h test_signals_utils.h testcases/testcases.h
.SECONDEXPANSION: diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.c b/tools/testing/selftests/arm64/signal/sve_helpers.c new file mode 100644 index 000000000000..0acc121af306 --- /dev/null +++ b/tools/testing/selftests/arm64/signal/sve_helpers.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 ARM Limited + * + * Common helper functions for SVE and SME functionality. + */ + +#include <stdbool.h> +#include <kselftest.h> +#include <asm/sigcontext.h> +#include <sys/prctl.h> + +unsigned int vls[SVE_VQ_MAX]; +unsigned int nvls; + +int sve_fill_vls(bool use_sme, int min_vls) +{ + int vq, vl; + int pr_set_vl = use_sme ? PR_SME_SET_VL : PR_SVE_SET_VL; + int len_mask = use_sme ? PR_SME_VL_LEN_MASK : PR_SVE_VL_LEN_MASK; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(pr_set_vl, vq * 16); + if (vl == -1) + return KSFT_FAIL; + + vl &= len_mask; + + /* + * Unlike SVE, SME does not require the minimum vector length + * to be implemented, or the VLs to be consecutive, so any call + * to the prctl might return the single implemented VL, which + * might be larger than 16. So to avoid this loop never + * terminating, bail out here when we find a higher VL than + * we asked for. + * See the ARM ARM, DDI 0487K.a, B1.4.2: I_QQRNR and I_NWYBP. + */ + if (vq < sve_vq_from_vl(vl)) + break; + + /* Skip missing VLs */ + vq = sve_vq_from_vl(vl); + + vls[nvls++] = vl; + } + + if (nvls < min_vls) { + fprintf(stderr, "Only %d VL supported\n", nvls); + return KSFT_SKIP; + } + + return KSFT_PASS; +} diff --git a/tools/testing/selftests/arm64/signal/sve_helpers.h b/tools/testing/selftests/arm64/signal/sve_helpers.h new file mode 100644 index 000000000000..50948ce471cc --- /dev/null +++ b/tools/testing/selftests/arm64/signal/sve_helpers.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 ARM Limited + * + * Common helper functions for SVE and SME functionality. + */ + +#ifndef __SVE_HELPERS_H__ +#define __SVE_HELPERS_H__ + +#include <stdbool.h> + +#define VLS_USE_SVE false +#define VLS_USE_SME true + +extern unsigned int vls[]; +extern unsigned int nvls; + +int sve_fill_vls(bool use_sme, int min_vls); + +#endif diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c index ebd5815b54bb..dfd6a2badf9f 100644 --- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c +++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c @@ -6,44 +6,28 @@ * handler, this is not supported and is expected to segfault. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
struct fake_sigframe sf; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 2);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SVE_SET_VL, vq * 16); - if (vl == -1) - return false; + if (!res) + return true;
- vl &= PR_SME_VL_LEN_MASK; + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least two VLs */ - if (nvls < 2) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } - - return true; + return false; }
static int fake_sigreturn_ssve_change_vl(struct tdescr *td, @@ -51,30 +35,30 @@ static int fake_sigreturn_ssve_change_vl(struct tdescr *td, { size_t resv_sz, offset; struct _aarch64_ctx *head = GET_SF_RESV_HEAD(sf); - struct sve_context *sve; + struct za_context *za;
/* Get a signal context with a SME ZA frame in it */ if (!get_current_context(td, &sf.uc, sizeof(sf.uc))) return 1;
resv_sz = GET_SF_RESV_SIZE(sf); - head = get_header(head, SVE_MAGIC, resv_sz, &offset); + head = get_header(head, ZA_MAGIC, resv_sz, &offset); if (!head) { - fprintf(stderr, "No SVE context\n"); + fprintf(stderr, "No ZA context\n"); return 1; }
- if (head->size != sizeof(struct sve_context)) { + if (head->size != sizeof(struct za_context)) { fprintf(stderr, "Register data present, aborting\n"); return 1; }
- sve = (struct sve_context *)head; + za = (struct za_context *)head;
/* No changes are supported; init left us at minimum VL so go to max */ fprintf(stderr, "Attempting to change VL from %d to %d\n", - sve->vl, vls[0]); - sve->vl = vls[0]; + za->vl, vls[0]); + za->vl = vls[0];
fake_sigreturn(&sf, sizeof(sf), 0);
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c index e2a452190511..e1ccf8f85a70 100644 --- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c +++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c @@ -12,40 +12,22 @@ #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
struct fake_sigframe sf; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sve_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SVE, 2);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SVE_SET_VL, vq * 16); - if (vl == -1) - return false; + if (!res) + return true;
- vl &= PR_SVE_VL_LEN_MASK; - - /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least two VLs */ - if (nvls < 2) { - fprintf(stderr, "Only %d VL supported\n", nvls); + if (res == KSFT_SKIP) td->result = KSFT_SKIP; - return false; - }
- return true; + return false; }
static int fake_sigreturn_sve_change_vl(struct tdescr *td, diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c index 3d37daafcff5..6dbe48cf8b09 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c @@ -6,51 +6,31 @@ * set up as expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 64]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Did we find the lowest supported VL? */ - if (vq < sve_vq_from_vl(vl)) - break; + if (!res) + return true;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_ssve_regs(void) diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c index 9dc5f128bbc0..5557e116e973 100644 --- a/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c @@ -6,51 +6,31 @@ * signal frames is set up as expected when enabled simultaneously. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 128]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Did we find the lowest supported VL? */ - if (vq < sve_vq_from_vl(vl)) - break; + if (!res) + return true;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_regs(void) diff --git a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c index 8b16eabbb769..8143eb1c58c1 100644 --- a/tools/testing/selftests/arm64/signal/testcases/sve_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/sve_regs.c @@ -6,47 +6,31 @@ * expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 64]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sve_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SVE, 1);
- /* - * Enumerate up to SVE_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SVE_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SVE_VL_LEN_MASK; - - /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); + if (!res) + return true;
- vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_sve_regs(void) diff --git a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c index 4d6f94b6178f..ce26e9c2fa5e 100644 --- a/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/za_no_regs.c @@ -6,47 +6,31 @@ * expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 128]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SME_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); + if (!res) + return true;
- vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static int do_one_sme_vl(struct tdescr *td, siginfo_t *si, ucontext_t *uc, diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c index 174ad6656696..b9e13f27f1f9 100644 --- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c +++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c @@ -6,51 +6,31 @@ * expected. */
+#include <kselftest.h> #include <signal.h> #include <ucontext.h> #include <sys/prctl.h>
#include "test_signals_utils.h" +#include "sve_helpers.h" #include "testcases.h"
static union { ucontext_t uc; char buf[1024 * 128]; } context; -static unsigned int vls[SVE_VQ_MAX]; -unsigned int nvls = 0;
static bool sme_get_vls(struct tdescr *td) { - int vq, vl; + int res = sve_fill_vls(VLS_USE_SME, 1);
- /* - * Enumerate up to SME_VQ_MAX vector lengths - */ - for (vq = SVE_VQ_MAX; vq > 0; --vq) { - vl = prctl(PR_SME_SET_VL, vq * 16); - if (vl == -1) - return false; - - vl &= PR_SME_VL_LEN_MASK; - - /* Did we find the lowest supported VL? */ - if (vq < sve_vq_from_vl(vl)) - break; + if (!res) + return true;
- /* Skip missing VLs */ - vq = sve_vq_from_vl(vl); - - vls[nvls++] = vl; - } - - /* We need at least one VL */ - if (nvls < 1) { - fprintf(stderr, "Only %d VL supported\n", nvls); - return false; - } + if (res == KSFT_SKIP) + td->result = KSFT_SKIP;
- return true; + return false; }
static void setup_za_regs(void) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 81d4757ecd4c..848fffa25022 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -427,23 +427,24 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h # $1 - input .c file # $2 - output .o file # $3 - CFLAGS +# $4 - binary name define CLANG_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 define CLANG_CPUV4_BPF_BUILD_RULE - $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(call msg,CLNG-BPF,$4,$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE - $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) + $(call msg,GCC-BPF,$4,$2) $(Q)$(BPF_GCC) $3 -DBPF_NO_PRESERVE_ACCESS_INDEX -Wno-attributes -O2 -c $1 -o $2 endef
@@ -535,7 +536,7 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \ $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ $(TRUNNER_BPF_CFLAGS) \ $$($$<-CFLAGS) \ - $$($$<-$2-CFLAGS)) + $$($$<-$2-CFLAGS),$(TRUNNER_BINARY))
$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) @@ -762,6 +763,8 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
+# Linking uprobe_multi can fail due to relocation overflows on mips. +$(OUTPUT)/uprobe_multi: CFLAGS += $(if $(filter mips, $(ARCH)),-mxgot) $(OUTPUT)/uprobe_multi: uprobe_multi.c $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) -O0 $(LDFLAGS) $^ $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 627b74ae041b..90dc3aca32bd 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -10,6 +10,7 @@ #include <sys/sysinfo.h> #include <signal.h> #include "bench.h" +#include "bpf_util.h" #include "testing_helpers.h"
struct env env = { diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 68180d8f8558..005c401b3e22 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -10,6 +10,7 @@ #include <math.h> #include <time.h> #include <sys/syscall.h> +#include <limits.h>
struct cpu_set { bool *cpus; diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c index 18405c3b7cee..af10c309359a 100644 --- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c @@ -412,7 +412,7 @@ static void test_sk_storage_map_stress_free(void) rlim_new.rlim_max = rlim_new.rlim_cur + 128; err = setrlimit(RLIMIT_NOFILE, &rlim_new); CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d", - rlim_new.rlim_cur, errno); + (unsigned long) rlim_new.rlim_cur, errno); }
err = do_sk_storage_map_stress_free(); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c index b52ff8ce34db..16bed9dd8e6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c @@ -95,7 +95,7 @@ static unsigned short get_local_port(int fd) struct sockaddr_in6 addr; socklen_t addrlen = sizeof(addr);
- if (!getsockname(fd, &addr, &addrlen)) + if (!getsockname(fd, (struct sockaddr *)&addr, &addrlen)) return ntohs(addr.sin6_port);
return 0; diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 47f42e680105..26019313e1fc 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include "progs/core_reloc_types.h" #include "bpf_testmod/bpf_testmod.h" diff --git a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c index b1a3a49a822a..42bd07f7218d 100644 --- a/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/crypto_sanity.c @@ -4,7 +4,6 @@ #include <sys/types.h> #include <sys/socket.h> #include <net/if.h> -#include <linux/in6.h> #include <linux/if_alg.h>
#include "test_progs.h" diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c index dcb9e5070cc3..d79f398ec6b7 100644 --- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c @@ -4,7 +4,6 @@ #include <sys/types.h> #include <sys/socket.h> #include <net/if.h> -#include <linux/in6.h>
#include "test_progs.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 9e5f38739104..3171047414a7 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> -#include <error.h> #include <linux/if_tun.h> #include <sys/uio.h>
diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index c07991544a78..34f8822fd221 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> #include "kfree_skb.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c index 835a1d756c16..b6e8d822e8e9 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -47,7 +47,6 @@ #include <linux/if_ether.h> #include <linux/if_packet.h> #include <linux/if_tun.h> -#include <linux/icmp.h> #include <arpa/inet.h> #include <unistd.h> #include <errno.h> diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c index 03825d2b45a8..6c50c0f63f43 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -49,6 +49,7 @@ * is not crashed, it is considered successful. */ #define NETNS "ns_lwt_reroute" +#include <netinet/in.h> #include "lwt_helpers.h" #include "network_helpers.h" #include <linux/net_tstamp.h> diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c index e72d75d6baa7..c29787e092d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c +++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c @@ -11,7 +11,7 @@ #include <sched.h> #include <sys/wait.h> #include <sys/mount.h> -#include <sys/fcntl.h> +#include <fcntl.h> #include "network_helpers.h"
#define STACK_SIZE (1024 * 1024) diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c index daa952711d8f..e9c07d561ded 100644 --- a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c +++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE #include <test_progs.h> #include <network_helpers.h> #include "test_parse_tcp_hdr_opt.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index ae87c00867ba..dcb2f62cdec6 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -18,7 +18,6 @@ #include <arpa/inet.h> #include <assert.h> #include <errno.h> -#include <error.h> #include <fcntl.h> #include <sched.h> #include <stdio.h> diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index 327d51f59142..53b8ffc943dc 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -471,7 +471,7 @@ static int set_forwarding(bool enable)
static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp) { - struct __kernel_timespec pkt_ts = {}; + struct timespec pkt_ts = {}; char ctl[CMSG_SPACE(sizeof(pkt_ts))]; struct timespec now_ts; struct msghdr msg = {}; @@ -495,7 +495,7 @@ static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
cmsg = CMSG_FIRSTHDR(&msg); if (cmsg && cmsg->cmsg_level == SOL_SOCKET && - cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) + cmsg->cmsg_type == SO_TIMESTAMPNS) memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; @@ -537,9 +537,9 @@ static int wait_netstamp_needed_key(void) if (!ASSERT_GE(srv_fd, 0, "start_server")) goto done;
- err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS, &opt, sizeof(opt)); - if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) goto done;
cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS); @@ -621,9 +621,9 @@ static void test_inet_dtime(int family, int type, const char *addr, __u16 port) return;
/* Ensure the kernel puts the (rcv) timestamp for all skb */ - err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS, &opt, sizeof(opt)); - if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS)")) goto done;
if (type == SOCK_STREAM) { diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index f2b99d95d916..c38784c1c066 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE #include <test_progs.h> #include "cgroup_helpers.h" #include "network_helpers.h" diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index e51721df14fc..dfff6feac12c 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -4,6 +4,7 @@ #define _GNU_SOURCE #include <linux/compiler.h> #include <linux/ring_buffer.h> +#include <linux/build_bug.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 81097a3f15eb..4f1029743734 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -2,6 +2,9 @@ #ifndef __BPF_MISC_H__ #define __BPF_MISC_H__
+#define XSTR(s) STR(s) +#define STR(s) #s + /* This set of attributes controls behavior of the * test_loader.c:test_loader__run_subtests(). * @@ -26,6 +29,9 @@ * * __regex Same as __msg, but using a regular expression. * __regex_unpriv Same as __msg_unpriv but using a regular expression. + * __xlated Expect a line in a disassembly log after verifier applies rewrites. + * Multiple __xlated attributes could be specified. + * __xlated_unpriv Same as __xlated but for unprivileged mode. * * __success Expect program load success in privileged mode. * __success_unpriv Expect program load success in unprivileged mode. @@ -60,14 +66,20 @@ * __auxiliary Annotated program is not a separate test, but used as auxiliary * for some other test cases and should always be loaded. * __auxiliary_unpriv Same, but load program in unprivileged mode. + * + * __arch_* Specify on which architecture the test case should be tested. + * Several __arch_* annotations could be specified at once. + * When test case is not run on current arch it is marked as skipped. */ -#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) -#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex))) +#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg))) +#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" XSTR(__COUNTER__) "=" regex))) +#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) -#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) -#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex))) +#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg))) +#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" XSTR(__COUNTER__) "=" regex))) +#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg))) #define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) @@ -77,6 +89,10 @@ #define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) #define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) #define __btf_path(path) __attribute__((btf_decl_tag("comment:test_btf_path=" path))) +#define __arch(arch) __attribute__((btf_decl_tag("comment:test_arch=" arch))) +#define __arch_x86_64 __arch("X86_64") +#define __arch_arm64 __arch("ARM64") +#define __arch_riscv64 __arch("RISCV64")
/* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h index a0778fe7857a..41d59f0ee606 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi.h +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h @@ -3,8 +3,6 @@ #ifndef __PROGS_CG_STORAGE_MULTI_H #define __PROGS_CG_STORAGE_MULTI_H
-#include <asm/types.h> - struct cgroup_value { __u32 egress_pkts; __u32 ingress_pkts; diff --git a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c index f5ac5f3e8919..568816307f71 100644 --- a/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c +++ b/tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c @@ -31,6 +31,7 @@ int BPF_PROG(check_access, struct bpf_map *map, fmode_t fmode)
if (fmode & FMODE_WRITE) return -EACCES; + barrier();
return 0; } diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 85e48069c9e6..d4b99c3b4719 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -1213,10 +1213,10 @@ __success __log_level(2) * - once for path entry - label 2; * - once for path entry - label 1 - label 2. */ -__msg("r1 = *(u64 *)(r10 -8)") -__msg("exit") -__msg("r1 = *(u64 *)(r10 -8)") -__msg("exit") +__msg("8: (79) r1 = *(u64 *)(r10 -8)") +__msg("9: (95) exit") +__msg("from 2 to 7") +__msg("8: safe") __msg("processed 11 insns") __flag(BPF_F_TEST_STATE_FREQ) __naked void old_stack_misc_vs_cur_ctx_ptr(void) diff --git a/tools/testing/selftests/bpf/test_cpp.cpp b/tools/testing/selftests/bpf/test_cpp.cpp index dde0bb16e782..abc2a56ab261 100644 --- a/tools/testing/selftests/bpf/test_cpp.cpp +++ b/tools/testing/selftests/bpf/test_cpp.cpp @@ -6,6 +6,10 @@ #include <bpf/libbpf.h> #include <bpf/bpf.h> #include <bpf/btf.h> + +#ifndef _Bool +#define _Bool bool +#endif #include "test_core_extern.skel.h" #include "struct_ops_module.skel.h"
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index f14e10b0de96..2150e9c9b53f 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -7,6 +7,7 @@ #include <bpf/btf.h>
#include "autoconf_helper.h" +#include "disasm_helpers.h" #include "unpriv_helpers.h" #include "cap_helpers.h"
@@ -19,10 +20,12 @@ #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" #define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex=" +#define TEST_TAG_EXPECT_XLATED_PFX "comment:test_expect_xlated=" #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" #define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv=" +#define TEST_TAG_EXPECT_XLATED_PFX_UNPRIV "comment:test_expect_xlated_unpriv=" #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" @@ -31,6 +34,7 @@ #define TEST_TAG_AUXILIARY "comment:test_auxiliary" #define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv" #define TEST_BTF_PATH "comment:test_btf_path=" +#define TEST_TAG_ARCH "comment:test_arch="
/* Warning: duplicated in bpf_misc.h */ #define POINTER_VALUE 0xcafe4all @@ -55,11 +59,16 @@ struct expect_msg { regex_t regex; };
+struct expected_msgs { + struct expect_msg *patterns; + size_t cnt; +}; + struct test_subspec { char *name; bool expect_failure; - struct expect_msg *expect_msgs; - size_t expect_msg_cnt; + struct expected_msgs expect_msgs; + struct expected_msgs expect_xlated; int retval; bool execute; }; @@ -72,6 +81,7 @@ struct test_spec { int log_level; int prog_flags; int mode_mask; + int arch_mask; bool auxiliary; bool valid; }; @@ -96,44 +106,47 @@ void test_loader_fini(struct test_loader *tester) free(tester->log_buf); }
-static void free_test_spec(struct test_spec *spec) +static void free_msgs(struct expected_msgs *msgs) { int i;
+ for (i = 0; i < msgs->cnt; i++) + if (msgs->patterns[i].regex_str) + regfree(&msgs->patterns[i].regex); + free(msgs->patterns); + msgs->patterns = NULL; + msgs->cnt = 0; +} + +static void free_test_spec(struct test_spec *spec) +{ /* Deallocate expect_msgs arrays. */ - for (i = 0; i < spec->priv.expect_msg_cnt; i++) - if (spec->priv.expect_msgs[i].regex_str) - regfree(&spec->priv.expect_msgs[i].regex); - for (i = 0; i < spec->unpriv.expect_msg_cnt; i++) - if (spec->unpriv.expect_msgs[i].regex_str) - regfree(&spec->unpriv.expect_msgs[i].regex); + free_msgs(&spec->priv.expect_msgs); + free_msgs(&spec->unpriv.expect_msgs); + free_msgs(&spec->priv.expect_xlated); + free_msgs(&spec->unpriv.expect_xlated);
free(spec->priv.name); free(spec->unpriv.name); - free(spec->priv.expect_msgs); - free(spec->unpriv.expect_msgs); - spec->priv.name = NULL; spec->unpriv.name = NULL; - spec->priv.expect_msgs = NULL; - spec->unpriv.expect_msgs = NULL; }
-static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec) +static int push_msg(const char *substr, const char *regex_str, struct expected_msgs *msgs) { void *tmp; int regcomp_res; char error_msg[100]; struct expect_msg *msg;
- tmp = realloc(subspec->expect_msgs, - (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg)); + tmp = realloc(msgs->patterns, + (1 + msgs->cnt) * sizeof(struct expect_msg)); if (!tmp) { ASSERT_FAIL("failed to realloc memory for messages\n"); return -ENOMEM; } - subspec->expect_msgs = tmp; - msg = &subspec->expect_msgs[subspec->expect_msg_cnt]; + msgs->patterns = tmp; + msg = &msgs->patterns[msgs->cnt];
if (substr) { msg->substr = substr; @@ -150,7 +163,7 @@ static int push_msg(const char *substr, const char *regex_str, struct test_subsp } }
- subspec->expect_msg_cnt += 1; + msgs->cnt += 1; return 0; }
@@ -202,6 +215,41 @@ static void update_flags(int *flags, int flag, bool clear) *flags |= flag; }
+/* Matches a string of form '<pfx>[^=]=.*' and returns it's suffix. + * Used to parse btf_decl_tag values. + * Such values require unique prefix because compiler does not add + * same __attribute__((btf_decl_tag(...))) twice. + * Test suite uses two-component tags for such cases: + * + * <pfx> __COUNTER__ '=' + * + * For example, two consecutive __msg tags '__msg("foo") __msg("foo")' + * would be encoded as: + * + * [18] DECL_TAG 'comment:test_expect_msg=0=foo' type_id=15 component_idx=-1 + * [19] DECL_TAG 'comment:test_expect_msg=1=foo' type_id=15 component_idx=-1 + * + * And the purpose of this function is to extract 'foo' from the above. + */ +static const char *skip_dynamic_pfx(const char *s, const char *pfx) +{ + const char *msg; + + if (strncmp(s, pfx, strlen(pfx)) != 0) + return NULL; + msg = s + strlen(pfx); + msg = strchr(msg, '='); + if (!msg) + return NULL; + return msg + 1; +} + +enum arch { + ARCH_X86_64 = 0x1, + ARCH_ARM64 = 0x2, + ARCH_RISCV64 = 0x4, +}; + /* Uses btf_decl_tag attributes to describe the expected test * behavior, see bpf_misc.h for detailed description of each attribute * and attribute combinations. @@ -215,6 +263,7 @@ static int parse_test_spec(struct test_loader *tester, bool has_unpriv_result = false; bool has_unpriv_retval = false; int func_id, i, err = 0; + u32 arch_mask = 0; struct btf *btf;
memset(spec, 0, sizeof(*spec)); @@ -270,27 +319,33 @@ static int parse_test_spec(struct test_loader *tester, } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) { spec->auxiliary = true; spec->mode_mask |= UNPRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { - msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; - err = push_msg(msg, NULL, &spec->priv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX))) { + err = push_msg(msg, NULL, &spec->priv.expect_msgs); if (err) goto cleanup; spec->mode_mask |= PRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) { - msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1; - err = push_msg(msg, NULL, &spec->unpriv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV))) { + err = push_msg(msg, NULL, &spec->unpriv.expect_msgs); if (err) goto cleanup; spec->mode_mask |= UNPRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) { - msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1; - err = push_msg(NULL, msg, &spec->priv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_REGEX_PFX))) { + err = push_msg(NULL, msg, &spec->priv.expect_msgs); if (err) goto cleanup; spec->mode_mask |= PRIV; - } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) { - msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1; - err = push_msg(NULL, msg, &spec->unpriv); + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV))) { + err = push_msg(NULL, msg, &spec->unpriv.expect_msgs); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) { + err = push_msg(msg, NULL, &spec->priv.expect_xlated); + if (err) + goto cleanup; + spec->mode_mask |= PRIV; + } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) { + err = push_msg(msg, NULL, &spec->unpriv.expect_xlated); if (err) goto cleanup; spec->mode_mask |= UNPRIV; @@ -341,11 +396,26 @@ static int parse_test_spec(struct test_loader *tester, goto cleanup; update_flags(&spec->prog_flags, flags, clear); } + } else if (str_has_pfx(s, TEST_TAG_ARCH)) { + val = s + sizeof(TEST_TAG_ARCH) - 1; + if (strcmp(val, "X86_64") == 0) { + arch_mask |= ARCH_X86_64; + } else if (strcmp(val, "ARM64") == 0) { + arch_mask |= ARCH_ARM64; + } else if (strcmp(val, "RISCV64") == 0) { + arch_mask |= ARCH_RISCV64; + } else { + PRINT_FAIL("bad arch spec: '%s'", val); + err = -EINVAL; + goto cleanup; + } } else if (str_has_pfx(s, TEST_BTF_PATH)) { spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1; } }
+ spec->arch_mask = arch_mask; + if (spec->mode_mask == 0) spec->mode_mask = PRIV;
@@ -387,11 +457,22 @@ static int parse_test_spec(struct test_loader *tester, spec->unpriv.execute = spec->priv.execute; }
- if (!spec->unpriv.expect_msgs) { - for (i = 0; i < spec->priv.expect_msg_cnt; i++) { - struct expect_msg *msg = &spec->priv.expect_msgs[i]; + if (spec->unpriv.expect_msgs.cnt == 0) { + for (i = 0; i < spec->priv.expect_msgs.cnt; i++) { + struct expect_msg *msg = &spec->priv.expect_msgs.patterns[i]; + + err = push_msg(msg->substr, msg->regex_str, + &spec->unpriv.expect_msgs); + if (err) + goto cleanup; + } + } + if (spec->unpriv.expect_xlated.cnt == 0) { + for (i = 0; i < spec->priv.expect_xlated.cnt; i++) { + struct expect_msg *msg = &spec->priv.expect_xlated.patterns[i];
- err = push_msg(msg->substr, msg->regex_str, &spec->unpriv); + err = push_msg(msg->substr, msg->regex_str, + &spec->unpriv.expect_xlated); if (err) goto cleanup; } @@ -434,7 +515,6 @@ static void prepare_case(struct test_loader *tester, bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
tester->log_buf[0] = '\0'; - tester->next_match_pos = 0; }
static void emit_verifier_log(const char *log_buf, bool force) @@ -444,39 +524,41 @@ static void emit_verifier_log(const char *log_buf, bool force) fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf); }
-static void validate_case(struct test_loader *tester, - struct test_subspec *subspec, - struct bpf_object *obj, - struct bpf_program *prog, - int load_err) +static void emit_xlated(const char *xlated, bool force) +{ + if (!force && env.verbosity == VERBOSE_NONE) + return; + fprintf(stdout, "XLATED:\n=============\n%s=============\n", xlated); +} + +static void validate_msgs(char *log_buf, struct expected_msgs *msgs, + void (*emit_fn)(const char *buf, bool force)) { - int i, j, err; - char *match; regmatch_t reg_match[1]; + const char *log = log_buf; + int i, j, err;
- for (i = 0; i < subspec->expect_msg_cnt; i++) { - struct expect_msg *msg = &subspec->expect_msgs[i]; + for (i = 0; i < msgs->cnt; i++) { + struct expect_msg *msg = &msgs->patterns[i]; + const char *match = NULL;
if (msg->substr) { - match = strstr(tester->log_buf + tester->next_match_pos, msg->substr); + match = strstr(log, msg->substr); if (match) - tester->next_match_pos = match - tester->log_buf + strlen(msg->substr); + log = match + strlen(msg->substr); } else { - err = regexec(&msg->regex, - tester->log_buf + tester->next_match_pos, 1, reg_match, 0); + err = regexec(&msg->regex, log, 1, reg_match, 0); if (err == 0) { - match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so; - tester->next_match_pos += reg_match[0].rm_eo; - } else { - match = NULL; + match = log + reg_match[0].rm_so; + log += reg_match[0].rm_eo; } }
if (!ASSERT_OK_PTR(match, "expect_msg")) { if (env.verbosity == VERBOSE_NONE) - emit_verifier_log(tester->log_buf, true /*force*/); + emit_fn(log_buf, true /*force*/); for (j = 0; j <= i; j++) { - msg = &subspec->expect_msgs[j]; + msg = &msgs->patterns[j]; fprintf(stderr, "%s %s: '%s'\n", j < i ? "MATCHED " : "EXPECTED", msg->substr ? "SUBSTR" : " REGEX", @@ -611,6 +693,51 @@ static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subs return true; }
+/* Get a disassembly of BPF program after verifier applies all rewrites */ +static int get_xlated_program_text(int prog_fd, char *text, size_t text_sz) +{ + struct bpf_insn *insn_start = NULL, *insn, *insn_end; + __u32 insns_cnt = 0, i; + char buf[64]; + FILE *out = NULL; + int err; + + err = get_xlated_program(prog_fd, &insn_start, &insns_cnt); + if (!ASSERT_OK(err, "get_xlated_program")) + goto out; + out = fmemopen(text, text_sz, "w"); + if (!ASSERT_OK_PTR(out, "open_memstream")) + goto out; + insn_end = insn_start + insns_cnt; + insn = insn_start; + while (insn < insn_end) { + i = insn - insn_start; + insn = disasm_insn(insn, buf, sizeof(buf)); + fprintf(out, "%d: %s\n", i, buf); + } + fflush(out); + +out: + free(insn_start); + if (out) + fclose(out); + return err; +} + +static bool run_on_current_arch(int arch_mask) +{ + if (arch_mask == 0) + return true; +#if defined(__x86_64__) + return arch_mask & ARCH_X86_64; +#elif defined(__aarch64__) + return arch_mask & ARCH_ARM64; +#elif defined(__riscv) && __riscv_xlen == 64 + return arch_mask & ARCH_RISCV64; +#endif + return false; +} + /* this function is forced noinline and has short generic name to look better * in test_progs output (in case of a failure) */ @@ -635,6 +762,11 @@ void run_subtest(struct test_loader *tester, if (!test__start_subtest(subspec->name)) return;
+ if (!run_on_current_arch(spec->arch_mask)) { + test__skip(); + return; + } + if (unpriv) { if (!can_execute_unpriv(tester, spec)) { test__skip(); @@ -695,9 +827,17 @@ void run_subtest(struct test_loader *tester, goto tobj_cleanup; } } - emit_verifier_log(tester->log_buf, false /*force*/); - validate_case(tester, subspec, tobj, tprog, err); + validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log); + + if (subspec->expect_xlated.cnt) { + err = get_xlated_program_text(bpf_program__fd(tprog), + tester->log_buf, tester->log_buf_sz); + if (err) + goto tobj_cleanup; + emit_xlated(tester->log_buf, false /*force*/); + validate_msgs(tester->log_buf, &subspec->expect_xlated, emit_xlated); + }
if (should_do_test_run(spec, subspec)) { /* For some reason test_verifier executes programs diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 4d0650cfb5cd..fda7589c5023 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -126,7 +126,8 @@ static int sched_next_online(int pid, int *next_to_try)
while (next < nr_cpus) { CPU_ZERO(&cpuset); - CPU_SET(next++, &cpuset); + CPU_SET(next, &cpuset); + next++; if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) { ret = 0; break; diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 89ff704e9dad..d5d0cb4eb197 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -10,7 +10,6 @@ #include <sched.h> #include <signal.h> #include <string.h> -#include <execinfo.h> /* backtrace */ #include <sys/sysinfo.h> /* get_nprocs */ #include <netinet/in.h> #include <sys/select.h> @@ -19,6 +18,21 @@ #include <bpf/btf.h> #include "json_writer.h"
+#ifdef __GLIBC__ +#include <execinfo.h> /* backtrace */ +#endif + +/* Default backtrace funcs if missing at link */ +__weak int backtrace(void **buffer, int size) +{ + return 0; +} + +__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) +{ + dprintf(fd, "<backtrace not supported>\n"); +} + static bool verbose(void) { return env.verbosity > VERBOSE_NONE; @@ -1731,7 +1745,7 @@ int main(int argc, char **argv) /* launch workers if requested */ env.worker_id = -1; /* main process */ if (env.workers) { - env.worker_pids = calloc(sizeof(__pid_t), env.workers); + env.worker_pids = calloc(sizeof(pid_t), env.workers); env.worker_socks = calloc(sizeof(int), env.workers); if (env.debug) fprintf(stdout, "Launching %d workers.\n", env.workers); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 51341d50213b..b1e949fb16cf 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -447,7 +447,6 @@ typedef int (*pre_execution_cb)(struct bpf_object *obj); struct test_loader { char *log_buf; size_t log_buf_sz; - size_t next_match_pos; pre_execution_cb pre_execution_cb;
struct bpf_object *obj; diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index d5379a0e6da8..680e452583a7 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -220,13 +220,13 @@ int parse_test_list(const char *s, bool is_glob_pattern) { char *input, *state = NULL, *test_spec; - int err = 0; + int err = 0, cnt = 0;
input = strdup(s); if (!input) return -ENOMEM;
- while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) { + while ((test_spec = strtok_r(cnt++ ? NULL : input, ",", &state))) { err = insert_test(set, test_spec, is_glob_pattern); if (err) break; @@ -451,7 +451,7 @@ int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
*cnt = xlated_prog_len / buf_element_size; *buf = calloc(*cnt, buf_element_size); - if (!buf) { + if (!*buf) { perror("can't allocate xlated program buffer"); return -ENOMEM; } diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c index b6d016461fb0..220f6a963813 100644 --- a/tools/testing/selftests/bpf/unpriv_helpers.c +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -2,7 +2,6 @@
#include <stdbool.h> #include <stdlib.h> -#include <error.h> #include <stdio.h> #include <string.h> #include <unistd.h> diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index b2854238d4a0..fd9780082ff4 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -784,13 +784,13 @@ static int parse_stat(const char *stat_name, struct stat_specs *specs) static int parse_stats(const char *stats_str, struct stat_specs *specs) { char *input, *state = NULL, *next; - int err; + int err, cnt = 0;
input = strdup(stats_str); if (!input) return -ENOMEM;
- while ((next = strtok_r(state ? NULL : input, ",", &state))) { + while ((next = strtok_r(cnt++ ? NULL : input, ",", &state))) { err = parse_stat(next, specs); if (err) { free(input); @@ -1493,7 +1493,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, while (fgets(line, sizeof(line), f)) { char *input = line, *state = NULL, *next; struct verif_stats *st = NULL; - int col = 0; + int col = 0, cnt = 0;
if (!header) { void *tmp; @@ -1511,7 +1511,7 @@ static int parse_stats_csv(const char *filename, struct stat_specs *specs, *stat_cntp += 1; }
- while ((next = strtok_r(state ? NULL : input, ",\n", &state))) { + while ((next = strtok_r(cnt++ ? NULL : input, ",\n", &state))) { if (header) { /* for the first line, set up spec stats */ err = parse_stat(next, specs); diff --git a/tools/testing/selftests/dt/test_unprobed_devices.sh b/tools/testing/selftests/dt/test_unprobed_devices.sh index 2d7e70c5ad2d..5e3f42ef249e 100755 --- a/tools/testing/selftests/dt/test_unprobed_devices.sh +++ b/tools/testing/selftests/dt/test_unprobed_devices.sh @@ -34,8 +34,21 @@ nodes_compatible=$( # Check if node is available if [[ -e "${node}"/status ]]; then status=$(tr -d '\000' < "${node}"/status) - [[ "${status}" != "okay" && "${status}" != "ok" ]] && continue + if [[ "${status}" != "okay" && "${status}" != "ok" ]]; then + if [ -n "${disabled_nodes_regex}" ]; then + disabled_nodes_regex="${disabled_nodes_regex}|${node}" + else + disabled_nodes_regex="${node}" + fi + continue + fi fi + + # Ignore this node if one of its ancestors was disabled + if [ -n "${disabled_nodes_regex}" ]; then + echo "${node}" | grep -q -E "${disabled_nodes_regex}" && continue + fi + echo "${node}" | sed -e 's|/proc/device-tree||' done | sort ) diff --git a/tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc b/tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc index c45094d1e1d2..803efd7b56c7 100644 --- a/tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc +++ b/tools/testing/selftests/ftrace/test.d/00basic/test_ownership.tc @@ -6,6 +6,18 @@ original_group=`stat -c "%g" .` original_owner=`stat -c "%u" .`
mount_point=`stat -c '%m' .` + +# If stat -c '%m' does not work (e.g. busybox) or failed, try to use the +# current working directory (which should be a tracefs) as the mount point. +if [ ! -d "$mount_point" ]; then + if mount | grep -qw $PWD ; then + mount_point=$PWD + else + # If PWD doesn't work, that is an environmental problem. + exit_unresolved + fi +fi + mount_options=`mount | grep "$mount_point" | sed -e 's/.*((.*)).*/\1/'`
# find another owner and group that is not the original diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc index 073a748b9380..263f6b798c85 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc @@ -19,7 +19,14 @@ fail() { # mesg
FILTER=set_ftrace_filter FUNC1="schedule" -FUNC2="sched_tick" +if grep '^sched_tick\b' available_filter_functions; then + FUNC2="sched_tick" +elif grep '^scheduler_tick\b' available_filter_functions; then + FUNC2="scheduler_tick" +else + exit_unresolved +fi +
ALL_FUNCS="#### all functions enabled ####"
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc index e21c9c27ece4..77f4c07cdcb8 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event char type argument -# requires: kprobe_events +# requires: kprobe_events available_filter_functions
case `uname -m` in x86_64) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index 93217d459556..39001073f7ed 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event string type argument -# requires: kprobe_events +# requires: kprobe_events available_filter_functions
case `uname -m` in x86_64) diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index b8967b6e29d5..e195ec156859 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -61,6 +61,7 @@ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif
+#if defined(__i386__) || defined(__x86_64__) /* arch */ /* * gcc cpuid.h provides __cpuid_count() since v4.4. * Clang/LLVM cpuid.h provides __cpuid_count() since v3.4.0. @@ -75,6 +76,7 @@ : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \ : "0" (level), "2" (count)) #endif +#endif /* end arch */
/* define kselftest exit codes */ #define KSFT_PASS 0 diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c index bfcea5cf9a48..473853718542 100644 --- a/tools/testing/selftests/mm/mseal_test.c +++ b/tools/testing/selftests/mm/mseal_test.c @@ -99,6 +99,16 @@ static int sys_madvise(void *start, size_t len, int types) return sret; }
+static void *sys_mremap(void *addr, size_t old_len, size_t new_len, + unsigned long flags, void *new_addr) +{ + void *sret; + + errno = 0; + sret = (void *) syscall(__NR_mremap, addr, old_len, new_len, flags, new_addr); + return sret; +} + static int sys_pkey_alloc(unsigned long flags, unsigned long init_val) { int ret = syscall(__NR_pkey_alloc, flags, init_val); @@ -1104,12 +1114,12 @@ static void test_seal_mremap_shrink(bool seal) }
/* shrink from 4 pages to 2 pages. */ - ret2 = mremap(ptr, size, 2 * page_size, 0, 0); + ret2 = sys_mremap(ptr, size, 2 * page_size, 0, 0); if (seal) { - FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); + FAIL_TEST_IF_FALSE(ret2 == (void *) MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); } else { - FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED); + FAIL_TEST_IF_FALSE(ret2 != (void *) MAP_FAILED);
}
@@ -1136,7 +1146,7 @@ static void test_seal_mremap_expand(bool seal) }
/* expand from 2 page to 4 pages. */ - ret2 = mremap(ptr, 2 * page_size, 4 * page_size, 0, 0); + ret2 = sys_mremap(ptr, 2 * page_size, 4 * page_size, 0, 0); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); @@ -1169,7 +1179,7 @@ static void test_seal_mremap_move(bool seal) }
/* move from ptr to fixed address. */ - ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newPtr); + ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newPtr); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); @@ -1288,7 +1298,7 @@ static void test_seal_mremap_shrink_fixed(bool seal) }
/* mremap to move and shrink to fixed address */ - ret2 = mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED, + ret2 = sys_mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); @@ -1319,7 +1329,7 @@ static void test_seal_mremap_expand_fixed(bool seal) }
/* mremap to move and expand to fixed address */ - ret2 = mremap(ptr, page_size, size, MREMAP_MAYMOVE | MREMAP_FIXED, + ret2 = sys_mremap(ptr, page_size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); @@ -1350,7 +1360,7 @@ static void test_seal_mremap_move_fixed(bool seal) }
/* mremap to move to fixed address */ - ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr); + ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, newAddr); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); @@ -1379,14 +1389,13 @@ static void test_seal_mremap_move_fixed_zero(bool seal) /* * MREMAP_FIXED can move the mapping to zero address */ - ret2 = mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED, + ret2 = sys_mremap(ptr, size, 2 * page_size, MREMAP_MAYMOVE | MREMAP_FIXED, 0); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); } else { FAIL_TEST_IF_FALSE(ret2 == 0); - }
REPORT_TEST_PASS(); @@ -1409,13 +1418,13 @@ static void test_seal_mremap_move_dontunmap(bool seal) }
/* mremap to move, and don't unmap src addr. */ - ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, 0); + ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, 0); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); } else { + /* kernel will allocate a new address */ FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED); - }
REPORT_TEST_PASS(); @@ -1423,7 +1432,7 @@ static void test_seal_mremap_move_dontunmap(bool seal)
static void test_seal_mremap_move_dontunmap_anyaddr(bool seal) { - void *ptr; + void *ptr, *ptr2; unsigned long page_size = getpagesize(); unsigned long size = 4 * page_size; int ret; @@ -1438,24 +1447,30 @@ static void test_seal_mremap_move_dontunmap_anyaddr(bool seal) }
/* - * The 0xdeaddead should not have effect on dest addr - * when MREMAP_DONTUNMAP is set. + * The new address is any address that not allocated. + * use allocate/free to similate that. */ - ret2 = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, - 0xdeaddead); + setup_single_address(size, &ptr2); + FAIL_TEST_IF_FALSE(ptr2 != (void *)-1); + ret = sys_munmap(ptr2, size); + FAIL_TEST_IF_FALSE(!ret); + + /* + * remap to any address. + */ + ret2 = sys_mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, + (void *) ptr2); if (seal) { FAIL_TEST_IF_FALSE(ret2 == MAP_FAILED); FAIL_TEST_IF_FALSE(errno == EPERM); } else { - FAIL_TEST_IF_FALSE(ret2 != MAP_FAILED); - FAIL_TEST_IF_FALSE((long)ret2 != 0xdeaddead); - + /* remap success and return ptr2 */ + FAIL_TEST_IF_FALSE(ret2 == ptr2); }
REPORT_TEST_PASS(); }
- static void test_seal_merge_and_split(void) { void *ptr; diff --git a/tools/testing/selftests/net/af_unix/msg_oob.c b/tools/testing/selftests/net/af_unix/msg_oob.c index 535eb2c3d7d1..3ed3882a93b8 100644 --- a/tools/testing/selftests/net/af_unix/msg_oob.c +++ b/tools/testing/selftests/net/af_unix/msg_oob.c @@ -525,6 +525,29 @@ TEST_F(msg_oob, ex_oob_drop_2) } }
+TEST_F(msg_oob, ex_oob_oob) +{ + sendpair("x", 1, MSG_OOB); + epollpair(true); + siocatmarkpair(true); + + recvpair("x", 1, 1, MSG_OOB); + epollpair(false); + siocatmarkpair(true); + + sendpair("y", 1, MSG_OOB); + epollpair(true); + siocatmarkpair(true); + + recvpair("", -EAGAIN, 1, 0); + epollpair(false); + siocatmarkpair(false); + + recvpair("", -EINVAL, 1, MSG_OOB); + epollpair(false); + siocatmarkpair(false); +} + TEST_F(msg_oob, ex_oob_ahead_break) { sendpair("hello", 5, MSG_OOB); diff --git a/tools/testing/selftests/net/netfilter/ipvs.sh b/tools/testing/selftests/net/netfilter/ipvs.sh index 4ceee9fb3949..d3edb16cd4b3 100755 --- a/tools/testing/selftests/net/netfilter/ipvs.sh +++ b/tools/testing/selftests/net/netfilter/ipvs.sh @@ -97,7 +97,7 @@ cleanup() { }
server_listen() { - ip netns exec "$ns2" socat -u -4 TCP-LISTEN:8080,reuseaddr STDOUT > "${outfile}" & + ip netns exec "$ns2" timeout 5 socat -u -4 TCP-LISTEN:8080,reuseaddr STDOUT > "${outfile}" & server_pid=$! sleep 0.2 } diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c index 742782438ca3..94cfdba5308d 100644 --- a/tools/testing/selftests/resctrl/cat_test.c +++ b/tools/testing/selftests/resctrl/cat_test.c @@ -290,12 +290,12 @@ static int cat_run_test(const struct resctrl_test *test, const struct user_param
static bool arch_supports_noncont_cat(const struct resctrl_test *test) { - unsigned int eax, ebx, ecx, edx; - /* AMD always supports non-contiguous CBM. */ if (get_vendor() == ARCH_AMD) return true;
+#if defined(__i386__) || defined(__x86_64__) /* arch */ + unsigned int eax, ebx, ecx, edx; /* Intel support for non-contiguous CBM needs to be discovered. */ if (!strcmp(test->resource, "L3")) __cpuid_count(0x10, 1, eax, ebx, ecx, edx); @@ -305,6 +305,9 @@ static bool arch_supports_noncont_cat(const struct resctrl_test *test) return false;
return ((ecx >> 3) & 1); +#endif /* end arch */ + + return false; }
static int noncont_cat_run_test(const struct resctrl_test *test, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index cb2b78e92910..7164a9ece208 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -5575,6 +5575,7 @@ __visible bool kvm_rebooting; EXPORT_SYMBOL_GPL(kvm_rebooting);
static DEFINE_PER_CPU(bool, hardware_enabled); +static DEFINE_MUTEX(kvm_usage_lock); static int kvm_usage_count;
static int __hardware_enable_nolock(void) @@ -5607,10 +5608,10 @@ static int kvm_online_cpu(unsigned int cpu) * be enabled. Otherwise running VMs would encounter unrecoverable * errors when scheduled to this CPU. */ - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock); if (kvm_usage_count) ret = __hardware_enable_nolock(); - mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); return ret; }
@@ -5630,10 +5631,10 @@ static void hardware_disable_nolock(void *junk)
static int kvm_offline_cpu(unsigned int cpu) { - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock); if (kvm_usage_count) hardware_disable_nolock(NULL); - mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); return 0; }
@@ -5649,9 +5650,9 @@ static void hardware_disable_all_nolock(void) static void hardware_disable_all(void) { cpus_read_lock(); - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock); hardware_disable_all_nolock(); - mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); cpus_read_unlock(); }
@@ -5682,7 +5683,7 @@ static int hardware_enable_all(void) * enable hardware multiple times. */ cpus_read_lock(); - mutex_lock(&kvm_lock); + mutex_lock(&kvm_usage_lock);
r = 0;
@@ -5696,7 +5697,7 @@ static int hardware_enable_all(void) } }
- mutex_unlock(&kvm_lock); + mutex_unlock(&kvm_usage_lock); cpus_read_unlock();
return r; @@ -5724,13 +5725,13 @@ static int kvm_suspend(void) { /* * Secondary CPUs and CPU hotplug are disabled across the suspend/resume - * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count - * is stable. Assert that kvm_lock is not held to ensure the system - * isn't suspended while KVM is enabling hardware. Hardware enabling - * can be preempted, but the task cannot be frozen until it has dropped - * all locks (userspace tasks are frozen via a fake signal). + * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage + * count is stable. Assert that kvm_usage_lock is not held to ensure + * the system isn't suspended while KVM is enabling hardware. Hardware + * enabling can be preempted, but the task cannot be frozen until it has + * dropped all locks (userspace tasks are frozen via a fake signal). */ - lockdep_assert_not_held(&kvm_lock); + lockdep_assert_not_held(&kvm_usage_lock); lockdep_assert_irqs_disabled();
if (kvm_usage_count) @@ -5740,7 +5741,7 @@ static int kvm_suspend(void)
static void kvm_resume(void) { - lockdep_assert_not_held(&kvm_lock); + lockdep_assert_not_held(&kvm_usage_lock); lockdep_assert_irqs_disabled();
if (kvm_usage_count)
linux-stable-mirror@lists.linaro.org