I'm announcing the release of the 6.12.31 kernel.
All users of the 6.12 kernel series must upgrade.
The updated 6.12.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.12.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/admin-guide/kernel-parameters.txt | 2 Documentation/driver-api/serial/driver.rst | 2 Documentation/hwmon/dell-smm-hwmon.rst | 14 Makefile | 6 arch/arm/boot/dts/nvidia/tegra114.dtsi | 2 arch/arm/mach-at91/pm.c | 21 arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts | 38 - arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts | 14 arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi | 22 arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi | 8 arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi | 2 arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts | 10 arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi | 15 arch/arm64/include/asm/cputype.h | 2 arch/arm64/include/asm/pgtable.h | 27 - arch/arm64/kernel/proton-pack.c | 1 arch/loongarch/kernel/Makefile | 8 arch/loongarch/kvm/Makefile | 2 arch/mips/include/asm/ftrace.h | 16 arch/mips/kernel/pm-cps.c | 30 - arch/powerpc/include/asm/mmzone.h | 1 arch/powerpc/kernel/prom_init.c | 4 arch/powerpc/mm/book3s64/radix_pgtable.c | 3 arch/powerpc/mm/numa.c | 2 arch/powerpc/perf/core-book3s.c | 20 arch/powerpc/perf/isa207-common.c | 4 arch/powerpc/platforms/pseries/iommu.c | 139 ++++-- arch/riscv/include/asm/page.h | 12 arch/riscv/include/asm/pgtable.h | 2 arch/riscv/kernel/Makefile | 4 arch/riscv/mm/tlbflush.c | 37 - arch/s390/hypfs/hypfs_diag_fs.c | 2 arch/s390/include/asm/tlb.h | 2 arch/um/kernel/mem.c | 1 arch/x86/Kconfig | 1 arch/x86/boot/genimage.sh | 5 arch/x86/entry/entry.S | 2 arch/x86/events/amd/ibs.c | 20 arch/x86/events/intel/ds.c | 9 arch/x86/include/asm/bug.h | 5 arch/x86/include/asm/cfi.h | 11 arch/x86/include/asm/ibt.h | 4 arch/x86/include/asm/intel-family.h | 1 arch/x86/include/asm/nmi.h | 2 arch/x86/include/asm/percpu.h | 28 - arch/x86/include/asm/perf_event.h | 1 arch/x86/include/asm/sev-common.h | 2 arch/x86/include/uapi/asm/bootparam.h | 4 arch/x86/include/uapi/asm/e820.h | 4 arch/x86/include/uapi/asm/ldt.h | 4 arch/x86/include/uapi/asm/msr.h | 4 arch/x86/include/uapi/asm/ptrace-abi.h | 6 arch/x86/include/uapi/asm/ptrace.h | 4 arch/x86/include/uapi/asm/setup_data.h | 4 arch/x86/include/uapi/asm/signal.h | 8 arch/x86/kernel/alternative.c | 30 + arch/x86/kernel/cfi.c | 22 arch/x86/kernel/cpu/bugs.c | 10 arch/x86/kernel/cpu/microcode/intel.c | 2 arch/x86/kernel/nmi.c | 42 + arch/x86/kernel/reboot.c | 10 arch/x86/kernel/smpboot.c | 6 arch/x86/kernel/traps.c | 82 ++- arch/x86/mm/init.c | 9 arch/x86/mm/init_64.c | 15 arch/x86/mm/kaslr.c | 10 arch/x86/power/cpu.c | 14 arch/x86/um/os-Linux/mcontext.c | 3 block/badblocks.c | 5 block/bdev.c | 17 block/blk-cgroup.c | 22 block/blk-settings.c | 5 block/blk-throttle.c | 15 block/blk-zoned.c | 5 block/blk.h | 3 block/bounce.c | 2 block/fops.c | 16 block/ioctl.c | 6 crypto/ahash.c | 4 crypto/algif_hash.c | 4 crypto/lzo-rle.c | 2 crypto/lzo.c | 2 crypto/skcipher.c | 1 drivers/accel/qaic/qaic_drv.c | 2 drivers/acpi/Kconfig | 2 drivers/acpi/acpi_pnp.c | 2 drivers/acpi/hed.c | 7 drivers/auxdisplay/charlcd.c | 5 drivers/auxdisplay/charlcd.h | 5 drivers/auxdisplay/hd44780.c | 2 drivers/auxdisplay/lcd2s.c | 2 drivers/auxdisplay/panel.c | 2 drivers/block/loop.c | 5 drivers/block/ublk_drv.c | 53 +- drivers/bluetooth/btmtksdio.c | 15 drivers/bluetooth/btusb.c | 98 +--- drivers/char/tpm/tpm2-sessions.c | 2 drivers/clk/clk-s2mps11.c | 3 drivers/clk/imx/clk-imx8mp.c | 151 ++++++ drivers/clk/qcom/Kconfig | 2 drivers/clk/qcom/camcc-sm8250.c | 56 +- drivers/clk/qcom/clk-alpha-pll.c | 52 +- drivers/clk/qcom/lpassaudiocc-sc7280.c | 23 - drivers/clk/renesas/rzg2l-cpg.c | 102 ++-- drivers/clk/sunxi-ng/ccu-sun20i-d1.c | 44 + drivers/clk/sunxi-ng/ccu_mp.h | 22 drivers/clocksource/mips-gic-timer.c | 6 drivers/clocksource/timer-riscv.c | 6 drivers/cpufreq/amd-pstate.c | 1 drivers/cpufreq/cpufreq-dt-platdev.c | 1 drivers/cpufreq/tegra186-cpufreq.c | 7 drivers/cpuidle/governors/menu.c | 13 drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c | 7 drivers/crypto/mxs-dcp.c | 8 drivers/dma/fsl-edma-main.c | 2 drivers/dma/idxd/cdev.c | 9 drivers/dpll/dpll_core.c | 5 drivers/edac/ie31200_edac.c | 28 - drivers/firmware/arm_ffa/bus.c | 1 drivers/firmware/arm_ffa/driver.c | 12 drivers/firmware/arm_scmi/bus.c | 19 drivers/firmware/xilinx/zynqmp.c | 6 drivers/fpga/altera-cvp.c | 2 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 5 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 52 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 30 + drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 31 - drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 30 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 14 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h | 3 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 38 + drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 7 drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 42 + drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 47 +- drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 48 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 25 + drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 27 + drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 31 + drivers/gpu/drm/amd/amdgpu/nv.c | 16 drivers/gpu/drm/amd/amdgpu/soc21.c | 10 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 39 - drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c | 69 ++- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c | 41 + drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c | 41 + drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c | 41 + drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 35 + drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 71 +-- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 16 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 23 - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 41 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 2 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 6 drivers/gpu/drm/amd/display/dc/basics/dc_common.c | 3 drivers/gpu/drm/amd/display/dc/bios/command_table2.c | 9 drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c | 3 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c | 22 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 15 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 13 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c | 2 drivers/gpu/drm/amd/display/dc/core/dc.c | 22 drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 21 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 12 drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 5 drivers/gpu/drm/amd/display/dc/dc_types.h | 7 drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c | 3 drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 3 drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 4 drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c | 3 drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c | 3 drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 6 drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 1 drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c | 8 drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c | 5 drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 1 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 11 drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c | 3 drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 10 drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 7 drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 8 drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 4 drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 3 drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c | 139 +++++- drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h | 7 drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c | 4 drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 6 drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h | 1 drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 6 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c | 42 + drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c | 8 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c | 5 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c | 7 drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c | 25 - drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c | 42 - drivers/gpu/drm/amd/display/dc/spl/dc_spl.c | 4 drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h | 2 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 6 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 17 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c | 4 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c | 47 +- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h | 3 drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c | 4 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h | 32 + drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h | 48 ++ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 5 drivers/gpu/drm/ast/ast_mode.c | 10 drivers/gpu/drm/bridge/adv7511/adv7511_audio.c | 2 drivers/gpu/drm/drm_atomic_helper.c | 28 + drivers/gpu/drm/drm_buddy.c | 5 drivers/gpu/drm/drm_edid.c | 1 drivers/gpu/drm/drm_gem.c | 4 drivers/gpu/drm/mediatek/mtk_dpi.c | 5 drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 2 drivers/gpu/drm/panel/panel-edp.c | 1 drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 40 + drivers/gpu/drm/v3d/v3d_drv.c | 25 - drivers/gpu/drm/xe/xe_bo.c | 22 drivers/gpu/drm/xe/xe_debugfs.c | 3 drivers/gpu/drm/xe/xe_device.c | 10 drivers/gpu/drm/xe/xe_gen_wa_oob.c | 6 drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 37 + drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 12 drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 22 drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 2 drivers/gpu/drm/xe/xe_guc_relay.c | 2 drivers/gpu/drm/xe/xe_oa.c | 1 drivers/gpu/drm/xe/xe_pci_sriov.c | 51 ++ drivers/gpu/drm/xe/xe_pt.c | 14 drivers/gpu/drm/xe/xe_pt.h | 3 drivers/gpu/drm/xe/xe_sa.c | 3 drivers/gpu/drm/xe/xe_tile.c | 12 drivers/gpu/drm/xe/xe_tile.h | 1 drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c | 17 drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h | 2 drivers/gpu/drm/xe/xe_vm.c | 32 + drivers/hid/usbhid/usbkbd.c | 2 drivers/hwmon/dell-smm-hwmon.c | 5 drivers/hwmon/gpio-fan.c | 16 drivers/hwmon/xgene-hwmon.c | 2 drivers/hwtracing/coresight/coresight-etb10.c | 26 - drivers/hwtracing/intel_th/Kconfig | 1 drivers/hwtracing/intel_th/msu.c | 31 - drivers/i2c/busses/i2c-designware-pcidrv.c | 33 - drivers/i2c/busses/i2c-designware-platdrv.c | 48 +- drivers/i2c/busses/i2c-pxa.c | 5 drivers/i2c/busses/i2c-qup.c | 36 + drivers/i3c/master/svc-i3c-master.c | 4 drivers/iio/adc/ad7944.c | 16 drivers/infiniband/core/umem.c | 36 + drivers/infiniband/core/uverbs_cmd.c | 144 +++--- drivers/infiniband/core/verbs.c | 11 drivers/input/joystick/xpad.c | 3 drivers/iommu/amd/io_pgtable_v2.c | 2 drivers/iommu/dma-iommu.c | 28 - drivers/iommu/iommu-priv.h | 2 drivers/iommu/iommu.c | 2 drivers/iommu/iommufd/device.c | 34 + drivers/iommu/iommufd/hw_pagetable.c | 3 drivers/iommu/of_iommu.c | 6 drivers/irqchip/irq-riscv-aplic-direct.c | 24 - drivers/irqchip/irq-riscv-imsic-early.c | 8 drivers/irqchip/irq-riscv-imsic-platform.c | 16 drivers/irqchip/irq-riscv-imsic-state.c | 96 ++-- drivers/irqchip/irq-riscv-imsic-state.h | 7 drivers/leds/rgb/leds-pwm-multicolor.c | 5 drivers/leds/trigger/ledtrig-netdev.c | 16 drivers/mailbox/mailbox.c | 7 drivers/mailbox/pcc.c | 8 drivers/md/dm-cache-target.c | 24 + drivers/md/dm-table.c | 4 drivers/md/dm-vdo/indexer/index-layout.c | 5 drivers/md/dm-vdo/vdo.c | 11 drivers/md/dm.c | 8 drivers/media/i2c/adv7180.c | 34 - drivers/media/i2c/imx219.c | 2 drivers/media/i2c/imx335.c | 21 drivers/media/i2c/tc358746.c | 19 drivers/media/platform/qcom/camss/camss-csid.c | 60 +- drivers/media/platform/qcom/camss/camss-vfe.c | 4 drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c | 3 drivers/media/test-drivers/vivid/vivid-kthread-cap.c | 11 drivers/media/test-drivers/vivid/vivid-kthread-out.c | 11 drivers/media/test-drivers/vivid/vivid-kthread-touch.c | 11 drivers/media/test-drivers/vivid/vivid-sdr-cap.c | 11 drivers/media/usb/cx231xx/cx231xx-417.c | 2 drivers/media/usb/uvc/uvc_ctrl.c | 77 +-- drivers/media/usb/uvc/uvc_v4l2.c | 6 drivers/media/v4l2-core/v4l2-subdev.c | 2 drivers/mfd/axp20x.c | 1 drivers/mfd/tps65219.c | 7 drivers/misc/eeprom/ee1004.c | 4 drivers/misc/mei/vsc-tp.c | 14 drivers/misc/pci_endpoint_test.c | 6 drivers/mmc/host/dw_mmc-exynos.c | 41 + drivers/mmc/host/sdhci-pci-core.c | 6 drivers/mmc/host/sdhci.c | 9 drivers/net/bonding/bond_main.c | 2 drivers/net/can/c_can/c_can_platform.c | 2 drivers/net/can/kvaser_pciefd.c | 101 ++-- drivers/net/can/slcan/slcan-core.c | 26 - drivers/net/ethernet/apm/xgene-v2/main.c | 4 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 drivers/net/ethernet/freescale/enetc/enetc.c | 16 drivers/net/ethernet/freescale/fec_main.c | 52 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 3 drivers/net/ethernet/intel/ice/ice_irq.c | 25 - drivers/net/ethernet/intel/ice/ice_lag.c | 6 drivers/net/ethernet/intel/ice/ice_lib.c | 2 drivers/net/ethernet/intel/ice/ice_main.c | 6 drivers/net/ethernet/intel/ice/ice_virtchnl.c | 1 drivers/net/ethernet/intel/idpf/idpf.h | 2 drivers/net/ethernet/intel/idpf/idpf_lib.c | 10 drivers/net/ethernet/intel/idpf/idpf_txrx.c | 18 drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 14 drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 2 drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c | 24 - drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c | 11 drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c | 8 drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 22 drivers/net/ethernet/mellanox/mlx4/alloc.c | 6 drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 16 drivers/net/ethernet/mellanox/mlx5/core/en/params.h | 1 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 1 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 49 -- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c | 28 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 36 - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 5 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | 3 drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c | 13 drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h | 5 drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c | 2 drivers/net/ethernet/mellanox/mlx5/core/events.c | 11 drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c | 6 drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h | 2 drivers/net/ethernet/mellanox/mlx5/core/health.c | 1 drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c | 3 drivers/net/ethernet/meta/fbnic/fbnic_netdev.c | 2 drivers/net/ethernet/microchip/lan743x_main.c | 19 drivers/net/ethernet/microsoft/mana/gdma_main.c | 2 drivers/net/ethernet/realtek/r8169_main.c | 28 + drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 3 drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 21 drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 2 drivers/net/ethernet/tehuti/tn40.c | 9 drivers/net/ethernet/tehuti/tn40.h | 33 + drivers/net/ethernet/tehuti/tn40_mdio.c | 82 +++ drivers/net/ethernet/ti/cpsw_new.c | 1 drivers/net/ieee802154/ca8210.c | 9 drivers/net/mctp/mctp-i2c.c | 2 drivers/net/phy/nxp-c45-tja11xx.c | 54 ++ drivers/net/phy/phylink.c | 2 drivers/net/usb/r8152.c | 1 drivers/net/vmxnet3/vmxnet3_drv.c | 5 drivers/net/vxlan/vxlan_core.c | 36 + drivers/net/wireless/ath/ath11k/dp.h | 6 drivers/net/wireless/ath/ath11k/dp_rx.c | 117 ++--- drivers/net/wireless/ath/ath12k/core.c | 12 drivers/net/wireless/ath/ath12k/core.h | 1 drivers/net/wireless/ath/ath12k/dp_mon.c | 16 drivers/net/wireless/ath/ath12k/dp_tx.c | 6 drivers/net/wireless/ath/ath12k/hal_desc.h | 2 drivers/net/wireless/ath/ath12k/hal_rx.h | 3 drivers/net/wireless/ath/ath12k/pci.c | 13 drivers/net/wireless/ath/ath12k/wmi.c | 4 drivers/net/wireless/ath/ath9k/init.c | 4 drivers/net/wireless/intel/iwlwifi/cfg/dr.c | 2 drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 4 drivers/net/wireless/intel/iwlwifi/fw/uefi.c | 8 drivers/net/wireless/intel/iwlwifi/fw/uefi.h | 4 drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c | 10 drivers/net/wireless/intel/iwlwifi/iwl-trans.c | 8 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c | 4 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 15 drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c | 3 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 3 drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 2 drivers/net/wireless/marvell/mwifiex/11n.c | 6 drivers/net/wireless/mediatek/mt76/mt76.h | 1 drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h | 3 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c | 3 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c | 3 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c | 3 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c | 3 drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 64 ++ drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h | 3 drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 4 drivers/net/wireless/mediatek/mt76/mt7996/mcu.h | 3 drivers/net/wireless/mediatek/mt76/mt7996/mmio.c | 2 drivers/net/wireless/mediatek/mt76/tx.c | 3 drivers/net/wireless/realtek/rtl8xxxu/core.c | 17 drivers/net/wireless/realtek/rtw88/mac.c | 6 drivers/net/wireless/realtek/rtw88/main.c | 40 - drivers/net/wireless/realtek/rtw88/reg.h | 3 drivers/net/wireless/realtek/rtw88/rtw8822b.c | 14 drivers/net/wireless/realtek/rtw88/util.c | 3 drivers/net/wireless/realtek/rtw89/coex.c | 14 drivers/net/wireless/realtek/rtw89/core.c | 23 - drivers/net/wireless/realtek/rtw89/core.h | 2 drivers/net/wireless/realtek/rtw89/fw.c | 101 ++++ drivers/net/wireless/realtek/rtw89/fw.h | 12 drivers/net/wireless/realtek/rtw89/mac.c | 55 ++ drivers/net/wireless/realtek/rtw89/mac.h | 3 drivers/net/wireless/realtek/rtw89/mac80211.c | 1 drivers/net/wireless/realtek/rtw89/reg.h | 4 drivers/net/wireless/realtek/rtw89/regd.c | 2 drivers/net/wireless/realtek/rtw89/rtw8851b.c | 1 drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 drivers/net/wireless/realtek/rtw89/rtw8852b.c | 1 drivers/net/wireless/realtek/rtw89/rtw8852bt.c | 1 drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 drivers/net/wireless/realtek/rtw89/rtw8922a.c | 1 drivers/net/wireless/realtek/rtw89/ser.c | 4 drivers/net/wireless/virtual/mac80211_hwsim.c | 14 drivers/nvdimm/label.c | 3 drivers/nvme/host/pci.c | 6 drivers/nvme/target/tcp.c | 3 drivers/nvmem/core.c | 40 + drivers/nvmem/qfprom.c | 26 - drivers/nvmem/rockchip-otp.c | 17 drivers/pci/Kconfig | 6 drivers/pci/ats.c | 33 + drivers/pci/controller/dwc/pcie-designware-ep.c | 2 drivers/pci/controller/dwc/pcie-designware-host.c | 2 drivers/pci/controller/pcie-brcmstb.c | 5 drivers/pci/controller/vmd.c | 20 drivers/pci/endpoint/functions/pci-epf-mhi.c | 2 drivers/pci/endpoint/functions/pci-epf-test.c | 2 drivers/pci/setup-bus.c | 6 drivers/perf/arm_pmuv3.c | 4 drivers/phy/phy-core.c | 7 drivers/phy/renesas/phy-rcar-gen3-usb2.c | 95 ++-- drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c | 4 drivers/phy/rockchip/phy-rockchip-usbdp.c | 87 ++- drivers/phy/samsung/phy-exynos5-usbdrd.c | 7 drivers/pinctrl/bcm/pinctrl-bcm281xx.c | 44 - drivers/pinctrl/devicetree.c | 10 drivers/pinctrl/meson/pinctrl-meson.c | 2 drivers/pinctrl/qcom/pinctrl-msm.c | 23 - drivers/pinctrl/renesas/pinctrl-rzg2l.c | 19 drivers/pinctrl/sophgo/pinctrl-cv18xx.c | 33 + drivers/pinctrl/tegra/pinctrl-tegra.c | 59 ++ drivers/pinctrl/tegra/pinctrl-tegra.h | 6 drivers/platform/x86/asus-wmi.c | 11 drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c | 2 drivers/platform/x86/ideapad-laptop.c | 16 drivers/platform/x86/intel/hid.c | 21 drivers/platform/x86/think-lmi.c | 26 - drivers/platform/x86/think-lmi.h | 1 drivers/pmdomain/core.c | 2 drivers/pmdomain/imx/gpcv2.c | 2 drivers/pmdomain/renesas/rcar-gen4-sysc.c | 5 drivers/pmdomain/renesas/rcar-sysc.c | 5 drivers/power/supply/axp20x_battery.c | 21 drivers/ptp/ptp_ocp.c | 24 - drivers/regulator/ad5398.c | 12 drivers/remoteproc/qcom_wcnss.c | 34 + drivers/rtc/rtc-ds1307.c | 4 drivers/rtc/rtc-rv3032.c | 2 drivers/s390/crypto/vfio_ap_ops.c | 72 ++- drivers/scsi/lpfc/lpfc_hbadisc.c | 29 - drivers/scsi/lpfc/lpfc_init.c | 2 drivers/scsi/mpi3mr/mpi3mr_fw.c | 8 drivers/scsi/mpt3sas/mpt3sas_ctl.c | 12 drivers/scsi/scsi_debug.c | 55 ++ drivers/scsi/scsi_sysctl.c | 4 drivers/scsi/st.c | 29 + drivers/scsi/st.h | 2 drivers/soc/apple/rtkit-internal.h | 1 drivers/soc/apple/rtkit.c | 58 +- drivers/soc/mediatek/mtk-mutex.c | 6 drivers/soc/samsung/exynos-asv.c | 1 drivers/soc/samsung/exynos-chipid.c | 1 drivers/soc/samsung/exynos-pmu.c | 1 drivers/soc/samsung/exynos-usi.c | 1 drivers/soc/samsung/exynos3250-pmu.c | 1 drivers/soc/samsung/exynos5250-pmu.c | 1 drivers/soc/samsung/exynos5420-pmu.c | 1 drivers/soc/ti/k3-socinfo.c | 13 drivers/soundwire/amd_manager.c | 2 drivers/soundwire/bus.c | 9 drivers/soundwire/cadence_master.c | 22 drivers/spi/spi-fsl-dspi.c | 46 +- drivers/spi/spi-mux.c | 4 drivers/spi/spi-rockchip.c | 2 drivers/spi/spi-zynqmp-gqspi.c | 20 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | 69 +-- drivers/target/iscsi/iscsi_target.c | 2 drivers/target/target_core_spc.c | 14 drivers/thermal/intel/x86_pkg_temp_thermal.c | 1 drivers/thermal/mediatek/lvts_thermal.c | 3 drivers/thermal/qoriq_thermal.c | 13 drivers/thunderbolt/retimer.c | 8 drivers/tty/serial/8250/8250_port.c | 2 drivers/tty/serial/atmel_serial.c | 2 drivers/tty/serial/imx.c | 2 drivers/tty/serial/serial_mctrl_gpio.c | 34 + drivers/tty/serial/serial_mctrl_gpio.h | 17 drivers/tty/serial/sh-sci.c | 98 ++++ drivers/tty/serial/stm32-usart.c | 2 drivers/ufs/core/ufshcd.c | 29 + drivers/usb/host/xhci-mem.c | 34 - drivers/usb/host/xhci-ring.c | 12 drivers/usb/host/xhci.h | 8 drivers/vdpa/mlx5/net/mlx5_vnet.c | 3 drivers/vfio/pci/vfio_pci_config.c | 3 drivers/vfio/pci/vfio_pci_core.c | 10 drivers/vfio/pci/vfio_pci_intrs.c | 2 drivers/vhost/scsi.c | 23 - drivers/video/fbdev/core/bitblit.c | 5 drivers/video/fbdev/core/fbcon.c | 10 drivers/video/fbdev/core/fbcon.h | 38 - drivers/video/fbdev/core/fbcon_ccw.c | 5 drivers/video/fbdev/core/fbcon_cw.c | 5 drivers/video/fbdev/core/fbcon_ud.c | 5 drivers/video/fbdev/core/tileblit.c | 45 + drivers/video/fbdev/fsl-diu-fb.c | 1 drivers/virtio/virtio_ring.c | 2 drivers/watchdog/aspeed_wdt.c | 81 +++ drivers/xen/pci.c | 32 + drivers/xen/platform-pci.c | 4 drivers/xen/xenbus/xenbus_probe.c | 14 fs/btrfs/block-group.c | 18 fs/btrfs/compression.c | 2 fs/btrfs/discard.c | 34 - fs/btrfs/disk-io.c | 28 - fs/btrfs/extent_io.c | 7 fs/btrfs/extent_io.h | 2 fs/btrfs/scrub.c | 4 fs/btrfs/send.c | 6 fs/buffer.c | 61 +- fs/dlm/lowcomms.c | 4 fs/erofs/internal.h | 4 fs/erofs/super.c | 46 -- fs/erofs/zdata.c | 4 fs/exfat/inode.c | 159 +++--- fs/ext4/balloc.c | 4 fs/ext4/ext4.h | 5 fs/ext4/extents.c | 19 fs/ext4/inode.c | 81 ++- fs/ext4/mballoc.c | 3 fs/ext4/page-io.c | 16 fs/ext4/super.c | 19 fs/f2fs/sysfs.c | 74 ++- fs/fuse/dir.c | 2 fs/gfs2/glock.c | 11 fs/jbd2/recovery.c | 11 fs/jbd2/revoke.c | 15 fs/namespace.c | 6 fs/nfs/delegation.c | 3 fs/nfs/flexfilelayout/flexfilelayout.c | 1 fs/nfs/inode.c | 2 fs/nfs/internal.h | 5 fs/nfs/nfs3proc.c | 2 fs/nfs/nfs4proc.c | 9 fs/nfs/nfs4state.c | 10 fs/nilfs2/the_nilfs.c | 3 fs/ocfs2/journal.c | 2 fs/orangefs/inode.c | 7 fs/pidfs.c | 9 fs/pstore/inode.c | 2 fs/pstore/internal.h | 4 fs/pstore/platform.c | 11 fs/smb/client/cifsacl.c | 17 fs/smb/client/cifspdu.h | 5 fs/smb/client/cifsproto.h | 7 fs/smb/client/cifssmb.c | 57 ++ fs/smb/client/connect.c | 30 + fs/smb/client/fs_context.c | 13 fs/smb/client/fs_context.h | 3 fs/smb/client/link.c | 8 fs/smb/client/readdir.c | 7 fs/smb/client/smb1ops.c | 228 ++++++++-- fs/smb/client/smb2file.c | 11 fs/smb/client/smb2ops.c | 30 - fs/smb/client/transport.c | 2 fs/smb/common/smb2pdu.h | 3 fs/smb/server/smb2pdu.c | 9 fs/smb/server/vfs.c | 14 include/crypto/hash.h | 3 include/drm/drm_atomic.h | 23 - include/drm/drm_gem.h | 13 include/linux/bpf-cgroup.h | 1 include/linux/buffer_head.h | 8 include/linux/device.h | 119 ----- include/linux/device/devres.h | 129 +++++ include/linux/dma-mapping.h | 12 include/linux/err.h | 3 include/linux/highmem.h | 10 include/linux/io.h | 2 include/linux/ipv6.h | 1 include/linux/lzo.h | 8 include/linux/mfd/axp20x.h | 1 include/linux/mlx4/device.h | 2 include/linux/mlx5/eswitch.h | 2 include/linux/mlx5/fs.h | 2 include/linux/mman.h | 2 include/linux/msi.h | 33 - include/linux/page-flags.h | 7 include/linux/pci-ats.h | 3 include/linux/perf_event.h | 8 include/linux/pnp.h | 2 include/linux/rcupdate.h | 2 include/linux/rcutree.h | 2 include/linux/spi/spi.h | 5 include/linux/trace.h | 4 include/linux/trace_seq.h | 8 include/linux/usb/r8152.h | 1 include/media/v4l2-subdev.h | 4 include/net/cfg80211.h | 3 include/net/mac80211.h | 4 include/net/xfrm.h | 1 include/rdma/uverbs_std_types.h | 2 include/sound/hda_codec.h | 1 include/sound/pcm.h | 2 include/trace/events/btrfs.h | 2 include/uapi/linux/bpf.h | 1 include/uapi/linux/iommufd.h | 14 include/uapi/linux/nl80211.h | 52 +- include/ufs/ufs_quirks.h | 6 io_uring/fdinfo.c | 4 io_uring/io_uring.c | 12 io_uring/msg_ring.c | 1 kernel/bpf/bpf_struct_ops.c | 98 +--- kernel/bpf/cgroup.c | 33 + kernel/bpf/hashtab.c | 2 kernel/bpf/syscall.c | 7 kernel/bpf/verifier.c | 34 + kernel/cgroup/cgroup.c | 2 kernel/cgroup/rstat.c | 12 kernel/dma/mapping.c | 25 - kernel/events/core.c | 98 ++-- kernel/events/hw_breakpoint.c | 5 kernel/events/ring_buffer.c | 1 kernel/exit.c | 6 kernel/fork.c | 9 kernel/padata.c | 3 kernel/printk/printk.c | 14 kernel/rcu/rcu.h | 2 kernel/rcu/tree.c | 15 kernel/rcu/tree_plugin.h | 22 kernel/sched/fair.c | 6 kernel/signal.c | 3 kernel/softirq.c | 18 kernel/time/hrtimer.c | 29 - kernel/time/posix-timers.c | 22 kernel/time/timer_list.c | 4 kernel/trace/trace.c | 11 kernel/trace/trace.h | 16 kernel/vhost_task.c | 2 lib/dynamic_queue_limits.c | 2 lib/lzo/Makefile | 2 lib/lzo/lzo1x_compress.c | 102 +++- lib/lzo/lzo1x_compress_safe.c | 18 mm/memcontrol.c | 6 mm/page_alloc.c | 8 mm/vmalloc.c | 13 net/bluetooth/hci_event.c | 3 net/bluetooth/l2cap_core.c | 15 net/bridge/br_mdb.c | 2 net/bridge/br_nf_core.c | 7 net/bridge/br_private.h | 1 net/can/bcm.c | 79 ++- net/core/dev.c | 12 net/core/dev.h | 12 net/core/page_pool.c | 7 net/core/pktgen.c | 13 net/dsa/tag_ksz.c | 19 net/hsr/hsr_device.c | 2 net/hsr/hsr_forward.c | 4 net/hsr/hsr_framereg.c | 95 ++++ net/hsr/hsr_framereg.h | 8 net/hsr/hsr_main.h | 2 net/ipv4/esp4.c | 53 -- net/ipv4/fib_frontend.c | 18 net/ipv4/fib_rules.c | 4 net/ipv4/fib_trie.c | 22 net/ipv4/inet_hashtables.c | 37 + net/ipv4/ip_gre.c | 16 net/ipv4/tcp_input.c | 56 +- net/ipv4/xfrm4_input.c | 18 net/ipv6/esp6.c | 53 -- net/ipv6/fib6_rules.c | 4 net/ipv6/ip6_gre.c | 2 net/ipv6/ip6_output.c | 9 net/ipv6/ip6_tunnel.c | 3 net/ipv6/ip6_vti.c | 3 net/ipv6/sit.c | 8 net/ipv6/xfrm6_input.c | 18 net/llc/af_llc.c | 8 net/mac80211/driver-ops.h | 3 net/mac80211/mlme.c | 10 net/mptcp/pm_userspace.c | 8 net/netfilter/nf_conntrack_standalone.c | 12 net/sched/sch_hfsc.c | 6 net/smc/smc_pnet.c | 8 net/sunrpc/clnt.c | 3 net/sunrpc/rpcb_clnt.c | 5 net/sunrpc/sched.c | 2 net/tipc/crypto.c | 5 net/wireless/chan.c | 8 net/wireless/nl80211.c | 4 net/wireless/reg.c | 4 net/xfrm/espintcp.c | 4 net/xfrm/xfrm_policy.c | 3 net/xfrm/xfrm_state.c | 6 net/xfrm/xfrm_user.c | 12 samples/bpf/Makefile | 2 scripts/Makefile.extrawarn | 11 scripts/config | 26 - scripts/kconfig/confdata.c | 19 scripts/kconfig/merge_config.sh | 4 security/integrity/ima/ima_main.c | 4 security/smack/smackfs.c | 21 sound/core/oss/pcm_oss.c | 3 sound/core/pcm_native.c | 11 sound/core/seq/seq_clientmgr.c | 5 sound/core/seq/seq_memory.c | 1 sound/pci/hda/hda_beep.c | 15 sound/pci/hda/patch_realtek.c | 77 +++ sound/soc/codecs/cs42l43-jack.c | 7 sound/soc/codecs/mt6359-accdet.h | 9 sound/soc/codecs/pcm3168a.c | 6 sound/soc/codecs/pcm6240.c | 28 - sound/soc/codecs/pcm6240.h | 7 sound/soc/codecs/rt722-sdca-sdw.c | 49 ++ sound/soc/codecs/tas2764.c | 53 +- sound/soc/codecs/wsa883x.c | 2 sound/soc/codecs/wsa884x.c | 2 sound/soc/fsl/imx-card.c | 2 sound/soc/intel/boards/bytcr_rt5640.c | 13 sound/soc/mediatek/mt8188/mt8188-afe-clk.c | 8 sound/soc/mediatek/mt8188/mt8188-afe-clk.h | 8 sound/soc/mediatek/mt8188/mt8188-afe-pcm.c | 4 sound/soc/qcom/sm8250.c | 3 sound/soc/sdw_utils/soc_sdw_cs42l43.c | 10 sound/soc/soc-dai.c | 8 sound/soc/soc-ops.c | 29 + sound/soc/sof/intel/hda-bus.c | 2 sound/soc/sof/intel/hda.c | 16 sound/soc/sof/ipc4-control.c | 11 sound/soc/sof/ipc4-pcm.c | 3 sound/soc/sof/topology.c | 18 sound/soc/sunxi/sun4i-codec.c | 53 ++ sound/usb/midi.c | 16 tools/bpf/bpftool/common.c | 3 tools/build/Makefile.build | 6 tools/include/uapi/linux/bpf.h | 1 tools/lib/bpf/libbpf.c | 2 tools/net/ynl/lib/ynl.c | 2 tools/net/ynl/ynl-gen-c.py | 3 tools/objtool/check.c | 21 tools/power/x86/turbostat/turbostat.8 | 1 tools/power/x86/turbostat/turbostat.c | 13 tools/testing/kunit/qemu_configs/x86_64.py | 4 tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c | 1 tools/testing/selftests/iommu/iommufd.c | 4 tools/testing/selftests/net/forwarding/bridge_mdb.sh | 2 tools/testing/selftests/net/gro.sh | 3 767 files changed, 8223 insertions(+), 3618 deletions(-)
Aaradhana Sahu (1): wifi: ath12k: Fetch regdb.bin file from board-2.bin
Aaron Kling (1): cpufreq: tegra186: Share policy per cluster
Adrian Hunter (1): perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq
Ahmad Fatoum (2): clk: imx8mp: inform CCF of maximum frequency of clocks pmdomain: imx: gpcv2: use proper helper for property detection
Al Viro (2): hypfs_create_cpu_files(): add missing check for hypfs_mkdir() failure __legitimize_mnt(): check for MNT_SYNC_UMOUNT should be under mount_lock
Aleksander Jan Bajkowski (1): r8152: add vendor/device ID pair for Dell Alienware AW1022z
Alex Deucher (5): drm/amdgpu: adjust drm_firmware_drivers_only() handling drm/amdgpu/gfx12: don't read registers in mqd init drm/amdgpu/gfx11: don't read registers in mqd init drm/amdgpu/mes11: fix set_hw_resources_1 calculation drm/amd/display/dm: drop hw_support check in amdgpu_dm_i2c_xfer()
Alex Williamson (1): vfio/pci: Handle INTx IRQ_NOTCONNECTED
Alexander Duyck (1): eth: fbnic: set IFF_UNICAST_FLT to avoid enabling promiscuous mode when adding unicast addrs
Alexander Stein (1): hwmon: (gpio-fan) Add missing mutex locks
Alexander Sverdlin (1): net: ethernet: ti: cpsw_new: populate netdev of_node
Alexandre Belloni (2): rtc: rv3032: fix EERD location rtc: ds1307: stop disabling alarms on probe
Alexandre Ghiti (1): riscv: Call secondary mmu notifier when flushing the tlb
Alexei Lazar (2): net/mlx5: XDP, Enable TX side XDP multi-buffer support net/mlx5: Extend Ethtool loopback selftest to support non-linear SKB
Alexey Klimov (1): ASoC: qcom: sm8250: explicitly set format in sm8250_be_hw_params_fixup()
Alexis Lothoré (1): serial: mctrl_gpio: split disable_ms into sync and no_sync APIs
Alice Guo (1): thermal/drivers/qoriq: Power down TMU on system suspend
Alistair Francis (1): nvmet-tcp: don't restore null sk_state_change
Amber Lin (1): drm/amdkfd: Correct F8_MODE for gfx950
Amery Hung (1): bpf: Search and add kfuncs in struct_ops prologue and epilogue
Andre Przywara (1): clk: sunxi-ng: d1: Add missing divider for MMC mod clocks
Andreas Gruenbacher (1): gfs2: Check for empty queue in run_queue
Andreas Schwab (1): powerpc/prom_init: Fixup missing #size-cells on PowerBook6,7
Andrei Botila (1): net: phy: nxp-c45-tja11xx: add match_phy_device to TJA1103/TJA1104
Andrew Bresticker (1): irqchip/riscv-imsic: Start local sync timer on correct CPU
Andrew Davis (1): soc: ti: k3-socinfo: Do not use syscon helper to build regmap
Andrew Jones (1): irqchip/riscv-imsic: Set irq_set_affinity() for IMSIC base
Andrey Vatoropin (1): hwmon: (xgene-hwmon) use appropriate type for the latency value
André Draszik (2): phy: exynos5-usbdrd: fix EDS distribution tuning (gs101) clk: s2mps11: initialise clk_hw_onecell_data::num before accessing ::hws[] in probe()
Andy Shevchenko (6): i2c: designware: Use temporary variable for struct device tracing: Mark binary printing functions with __printf() attribute auxdisplay: charlcd: Partially revert "Move hwidth and bwidth to struct hd44780_common" ieee802154: ca8210: Use proper setters and getters for bitwise types hrtimers: Replace hrtimer_clock_to_base_table with switch-case driver core: Split devres APIs to device/devres.h
Andy Yan (2): phy: rockchip: usbdp: Only verify link rates/lanes/voltage when the corresponding set flags are set drm/rockchip: vop2: Add uv swap for cluster window
AngeloGioacchino Del Regno (2): soc: mediatek: mtk-mutex: Add DPI1 SOF/EOF to MT8188 mutex tables drm/mediatek: mtk_dpi: Add checks for reg_h_fre_con existence
Anjaneyulu (1): wifi: cfg80211: allow IR in 20 MHz configurations
Ankur Arora (3): rcu: handle quiescent states for PREEMPT_RCU=n, PREEMPT_COUNT=y rcu: handle unstable rdp in rcu_read_unlock_strict() rcu: fix header guard for rcu_all_qs()
Anthony Krowiak (1): s390/vfio-ap: Fix no AP queue sharing allowed message written to kernel log
Anup Patel (1): irqchip/riscv-imsic: Separate next and previous pointers in IMSIC vector
Aric Cyr (1): drm/amd/display: Request HW cursor on DCN3.2 with SubVP
Arnd Bergmann (4): soc: samsung: include linux/array_size.h where needed net: xgene-v2: remove incorrect ACPI_PTR annotation EDAC/ie31200: work around false positive build warning watchdog: aspeed: fix 64-bit division
Artur Weber (1): pinctrl: bcm281xx: Use "unsigned int" instead of bare "unsigned"
Asad Kamal (1): drm/amd/pm: Skip P2S load for SMU v13.0.12
Assadian, Navid (1): drm/amd/display: Fix mismatch type comparison
Athira Rajeev (1): arch/powerpc/perf: Check the instruction type before creating sample with perf_mem_data_src
Austin Zheng (2): drm/amd/display: Use Nominal vBlank If Provided Instead Of Capping It drm/amd/display: Call FP Protect Before Mode Programming/Mode Support
Avraham Stern (1): wifi: iwlwifi: mvm: fix setting the TK when associated
Avula Sri Charan (1): wifi: ath12k: Avoid napi_sync() before napi_enable()
Axel Forsman (2): can: kvaser_pciefd: Continue parsing DMA buf after dropped RX can: kvaser_pciefd: Fix echo_skb race
Balbir Singh (4): dma/mapping.c: dev_dbg support for dma_addressing_limited dma-mapping: Fix warning reported for missing prototype x86/kaslr: Reduce KASLR entropy on most x86 systems x86/mm/init: Handle the special case of device private pages in add_pages(), to not increase max_pfn and trigger dma_addressing_limited() bounce buffers bounce buffers
Baokun Li (2): ext4: reject the 'data_err=abort' option in nojournal mode ext4: do not convert the unwritten extents if data writeback fails
Bard Liao (1): soundwire: cadence_master: set frame shape and divider based on actual clk freq
Benjamin Berg (1): um: Store full CSGSFS and SS register from mcontext
Benjamin Lin (1): wifi: mt76: mt7996: revise TXS size
Bibo Mao (1): MIPS: Use arch specific syscall name match function
Bitterblue Smith (6): wifi: rtw88: Fix rtw_init_vht_cap() for RTL8814AU wifi: rtw88: Fix rtw_init_ht_cap() for RTL8814AU wifi: rtw88: Fix rtw_desc_to_mcsrate() to handle MCS16-31 wifi: rtw88: Fix download_firmware_validate() for RTL8814AU wifi: rtw88: Fix __rtw_download_firmware() for RTL8814AU wifi: rtw88: Don't use static local variable in rtw8822b_set_tx_power_index_by_rate
Bogdan-Gabriel Roman (1): spi: spi-fsl-dspi: Halt the module after a new message transfer
Boris Burkov (2): btrfs: make btrfs_discard_workfn() block_group ref explicit btrfs: handle empty eb->folios in num_extent_folios()
Brandon Kammerdiener (1): bpf: fix possible endless loop in BPF map iteration
Brandon Syu (1): Revert "drm/amd/display: Exit idle optimizations before attempt to access PHY"
Brendan Jackman (1): kunit: tool: Use qboot on QEMU x86_64
Breno Leitao (2): x86/bugs: Make spectre user default depend on MITIGATION_SPECTRE_V2 memcg: always call cond_resched() after fn()
Caleb Sander Mateos (1): ublk: complete command synchronously on error
Carlos Sanchez (1): can: slcan: allow reception of short error messages
Carolina Jubran (1): net/mlx5e: Avoid WARN_ON when configuring MQPRIO with HTB offload enabled
Cezary Rojewski (1): ASoC: codecs: pcm3168a: Allow for 24-bit in provider mode
Chaohai Chen (1): scsi: target: spc: Fix loop traversal in spc_rsoc_get_descr()
Charlene Liu (3): drm/amd/display: remove minimum Dispclk and apply oem panel timing. drm/amd/display: fix dcn4x init failed drm/amd/display: pass calculated dram_speed_mts to dml2
Charles Keepax (3): ASoC: rt722-sdca: Add some missing readable registers ASoC: cs42l43: Disable headphone clamps during type detection soundwire: bus: Fix race on the creation of the IRQ domain
Chen Linxuan (1): blk-cgroup: improve policy registration error handling
Chenyuan Yang (1): ASoC: imx-card: Adjust over allocation of memory in imx_card_parse_of()
Chin-Ting Kuo (1): watchdog: aspeed: Update bootstatus handling
Ching-Te Ku (2): wifi: rtw89: coex: Assign value over than 0 to avoid firmware timer hang wifi: rtw89: coex: Separated Wi-Fi connecting event from Wi-Fi scan event
Choong Yong Liang (1): net: phylink: use pl->link_interface in phylink_expects_phy()
Chris Lu (2): Bluetooth: btmtksdio: Check function enabled before doing close Bluetooth: btmtksdio: Do close if SDIO card removed without close
Chris Morgan (2): power: supply: axp20x_battery: Update temp sensor for AXP717 from device tree mfd: axp20x: AXP717: Add AXP717_TS_PIN_CFG to writeable regs
Christian Brauner (1): pidfs: improve multi-threaded exec and premature thread-group leader exit polling
Christian Bruel (1): PCI: endpoint: pci-epf-test: Fix double free that causes kernel to oops
Christian Göttsche (1): ext4: reorder capability check last
Christian König (1): drm/amdgpu: remove all KFD fences from the BO on release
Christoph Hellwig (3): block: mark bounce buffering as incompatible with integrity loop: check in LO_FLAGS_DIRECT_IO in loop_default_blocksize loop: don't require ->write_iter for writable files in loop_configure
Christophe JAILLET (1): i2c: designware: Fix an error handling path in i2c_dw_pci_probe()
ChunHao Lin (1): r8169: disable RTL8126 ZRX-DC timeout
Chung Chung (1): dm vdo indexer: prevent unterminated string warning
Claudiu Beznea (5): phy: renesas: rcar-gen3-usb2: Move IRQ request in probe phy: renesas: rcar-gen3-usb2: Lock around hardware registers and driver data phy: renesas: rcar-gen3-usb2: Assert PLL reset on PHY power off serial: sh-sci: Update the suspend/resume support pinctrl: renesas: rzg2l: Add suspend/resume support for pull up/down
Coly Li (1): badblocks: Fix a nonsense WARN_ON() which checks whether a u64 variable < 0
Cong Wang (1): sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue()
Cristian Ciocaltea (1): drm/rockchip: vop2: Improve display modes handling on RK3588 HDMI0
Csókás, Bence (1): net: fec: Refactor MAC reset to function
Damon Ding (1): phy: phy-rockchip-samsung-hdptx: Swap the definitions of LCPLL_REF and ROPLL_REF
Dan Carpenter (2): pmdomain: core: Fix error checking in genpd_dev_pm_attach_by_id() pinctrl: tegra: Fix off by one in tegra_pinctrl_get_group()
Daniel Gabay (1): wifi: iwlwifi: w/a FW SMPS mode selection
Daniel Gomez (2): kconfig: merge_config: use an empty file as initfile drm/xe: xe_gen_wa_oob: replace program_invocation_short_name
Daniel Hsu (1): mctp: Fix incorrect tx flow invalidation condition in mctp-i2c
Danny Wang (1): drm/amd/display: Do not enable replay when vtotal update is pending.
Darrick J. Wong (1): block: fix race between set_blocksize and read paths
Dave Ertman (1): ice: Fix LACP bonds without SRIOV environment
Dave Jiang (1): dmaengine: idxd: Fix ->poll() return value
David Hildenbrand (1): kernel/fork: only call untrack_pfn_clear() on VMAs duplicated for fork()
David Lechner (1): iio: adc: ad7944: don't use storagebits for sizing
David Plowman (1): media: i2c: imx219: Correct the minimum vblanking value
David Rosca (1): drm/amdgpu: Update SRIOV video codec caps
David Wei (1): tools: ynl-gen: validate 0 len strings from kernel
Davidlohr Bueso (6): fs/buffer: split locking for pagecache lookups fs/buffer: introduce sleeping flavors for pagecache lookups fs/buffer: use sleeping version of __find_get_block() fs/ocfs2: use sleeping version of __find_get_block() fs/jbd2: use sleeping version of __find_get_block() fs/ext4: use sleeping version of sb_find_get_block()
Depeng Shao (2): media: qcom: camss: csid: Only add TPG v4l2 ctrl if TPG hardware is available media: qcom: camss: Add default case in vfe_src_pad_code
Dhananjay Ugwekar (1): cpufreq: amd-pstate: Remove unnecessary driver_lock in set_boost
Dian-Syuan Yang (1): wifi: rtw89: set force HE TB mode when connecting to 11ax AP
Dillon Varone (4): drm/amd/display: Configure DTBCLK_P with OPTC only for dcn401 drm/amd/display: Fix DMUB reset sequence for DCN401 drm/amd/display: Fix p-state type when p-state is unsupported drm/amd/display: Populate register address for dentist for dcn401
Diogo Ivo (2): ACPI: PNP: Add Intel OC Watchdog IDs to non-PNP device list arm64: tegra: p2597: Fix gpio for vdd-1v8-dis regulator
Dmitry Baryshkov (6): nvmem: core: fix bit offsets of more than one byte nvmem: core: verify cell's raw_len nvmem: core: update raw_len if the bit reading is required nvmem: qfprom: switch to 4-byte aligned reads phy: core: don't require set_mode() callback for phy_get_mode() to work pinctrl: qcom: switch to devm_register_sys_off_handler()
Dmitry Bogdanov (1): scsi: target: iscsi: Fix timeout on deleted connection
Dominik Grzegorzek (1): padata: do not leak refcount in reorder_work
Dongli Zhang (1): vhost-scsi: protect vq->log_used with vq->mutex
Douglas Anderson (1): drm/panel-edp: Add Starry 116KHD024006
Ed Burcher (1): ALSA: hda/realtek: Add quirk for Lenovo Yoga Pro 7 14ASP10
Eddie James (1): eeprom: ee1004: Check chip before probing
Eduard Zingerman (3): bpf: don't do clean_live_states when state->loop_entry->branches > 0 bpf: copy_verifier_state() should copy 'loop_entry' field bpf: abort verification if env->cur_state->loop_entry != NULL
Emily Deng (1): drm/amdgpu: Fix missing drain retry fault the last entry
Emmanuel Grumbach (2): wifi: iwlwifi: fix the ECKV UEFI variable name wifi: mac80211: set ieee80211_prep_tx_info::link_id upon Auth Rx
En-Wei Wu (1): Bluetooth: btusb: use skb_pull to avoid unsafe access in QCA dump handling
Eric Dumazet (5): cgroup/rstat: avoid disabling irqs for O(num_cpu) posix-timers: Add cond_resched() to posix_timer_add() search loop tcp: bring back NUMA dispersion in inet_ehash_locks_alloc() net: flush_backlog() small changes idpf: fix idpf_vport_splitq_napi_poll()
Eric Huang (1): drm/amdkfd: fix missing L2 cache info in topology
Eric Woudstra (1): net: ethernet: mtk_ppe_offload: Allow QinQ, double ETH_P_8021Q only
Erick Shepherd (2): mmc: host: Wait for Vdd to settle on card power off mmc: sdhci: Disable SD card clock before changing parameters
Felix Fietkau (1): wifi: mt76: only mark tx-status-failed frames as ACKed on mt76x0/2
Felix Kuehling (1): drm/amdgpu: Allow P2P access through XGMI
Filipe Manana (3): btrfs: fix non-empty delayed iputs list on unmount due to async workers btrfs: get zone unusable bytes while holding lock at btrfs_reclaim_bgs_work() btrfs: send: return -ENAMETOOLONG when attempting a path that is too long
Flora Cui (2): drm/amdgpu/discovery: check ip_discovery fw file available drm/amdgpu: release xcp_mgr on exit
Frank Li (3): PCI: dwc: ep: Ensure proper iteration over outbound map windows PCI: dwc: Use resource start as ioremap() input in dw_pcie_pme_turn_off() i3c: master: svc: Flush FIFO before sending Dynamic Address Assignment(DAA)
Frederick Lawler (1): ima: process_measurement() needlessly takes inode_lock() on MAY_READ
Frediano Ziglio (1): xen: Add support for XenServer 6.1 platform device
Gabor Juhos (1): arm64: dts: marvell: uDPU: define pinctrl state for alarm LEDs
Gao Xiang (1): erofs: initialize decompression early
Gaurav Batra (2): powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory powerpc/pseries/iommu: create DDW for devices with DMA mask less than 64-bits
Gašper Nemgar (1): platform/x86: ideapad-laptop: add support for some new buttons
Geert Uytterhoeven (3): ipv4: ip_gre: Fix set but not used warning in ipgre_err() if IPv4-only pmdomain: renesas: rcar: Remove obsolete nullify checks serial: sh-sci: Save and restore more registers
Geetha sowjanya (1): octeontx2-af: Fix APR entry mapping based on APR_LMT_CFG
George Shen (3): drm/amd/display: Skip checking FRL_MODE bit for PCON BW determination drm/amd/display: Read LTTPR ALPM caps during link cap retrieval drm/amd/display: Update CR AUX RD interval interpretation
Goldwyn Rodrigues (1): btrfs: correct the order of prelim_ref arguments in btrfs__prelim_ref
Greg Kroah-Hartman (2): spi: use container_of_cont() for to_spi_device() Linux 6.12.31
Guangguan Wang (1): net/smc: use the correct ndev to find pnetid by pnetid table
Hangbin Liu (1): bonding: report duplicate MAC address in all situations
Hans Verkuil (2): media: cx231xx: set device_caps for 417 media: test-drivers: vivid: don't call schedule in loop
Hans de Goede (1): mei: vsc: Use struct vsc_tp_packet as vsc-tp tx_buf and rx_buf type
Hans-Frieder Vogt (2): net: tn40xx: add pci-id of the aqr105-based Tehuti TN4010 cards net: tn40xx: create swnode for mdio and aqr105 phy and add to mdiobus
Haoran Jiang (1): samples/bpf: Fix compilation failure for samples/bpf on LoongArch Fedora
Hariprasad Kelam (1): Octeontx2-af: RPM: Register driver with PCI subsys IDs
Harish Kasiviswanathan (3): drm/amdkfd: Set per-process flags only once for gfx9/10/11/12 drm/amdkfd: Set per-process flags only once cik/vi drm/amdgpu: Set snoop bit for SDMA for MI series
Harry VanZyllDeJong (1): drm/amd/display: Add support for disconnected eDP streams
Harry Wentland (1): drm/amd/display: Don't treat wb connector as physical in create_validate_stream_for_sink
Hector Martin (4): soc: apple: rtkit: Implement OSLog buffers properly ASoC: tas2764: Add reg defaults for TAS2764_INT_CLK_CFG ASoC: tas2764: Mark SW_RESET as volatile ASoC: tas2764: Power up/down amp on mute ops
Heiko Carstens (1): s390/tlb: Use mm_has_pgste() instead of mm_alloc_pgste()
Heiko Stuebner (2): nvmem: rockchip-otp: Move read-offset into variant-data nvmem: rockchip-otp: add rk3576 variant data
Heiner Kallweit (1): r8169: don't scan PHY addresses > 0
Heming Zhao (1): dlm: make tcp still work in multi-link env
Herbert Xu (3): crypto: lzo - Fix compression buffer overrun crypto: ahash - Set default reqsize from ahash_alg crypto: skcipher - Zap type in crypto_alloc_sync_skcipher
Huacai Chen (1): net: stmmac: dwmac-loongson: Set correct {tx,rx}_fifo_size
Ian Rogers (1): tools/build: Don't pass test log files to linker
Ido Schimmel (2): vxlan: Annotate FDB data races bridge: netfilter: Fix forwarding of fragmented packets
Ignacio Moreno Gonzalez (1): mm: mmap: map MAP_STACK to VM_NOHUGEPAGE only if THP is enabled
Ihor Solodrai (1): selftests/bpf: Mitigate sockmap_ktls disconnect_after_delete failure
Ilan Peer (1): wifi: mac80211_hwsim: Fix MLD address translation
Ilia Gavrilov (1): llc: fix data loss when reading from a socket in llc_ui_recvmsg()
Ilpo Järvinen (2): tcp: reorganize tcp_in_ack_event() and tcp_count_delivered() PCI: Fix old_size lower bound in calculate_iosize() too
Ilya Bakoulin (2): drm/amd/display: Fix BT2020 YCbCr limited/full range input drm/amd/display: Don't try AUX transactions on disconnected link
Ingo Molnar (1): x86/stackprotector/64: Only export __ref_stack_chk_guard on CONFIG_SMP
Inochi Amaoto (1): pinctrl: sophgo: avoid to modify untouched bit when setting cv1800 pinconf
Isaac Scott (1): regulator: ad5398: Add device tree support
Ivan Pravdin (1): crypto: algif_hash - fix double free in hash_accept
Jaakko Karrenpalo (1): net: hsr: Fix PRP duplicate detection
Jacob Keller (1): ice: fix vf->num_mac count with port representors
Jaegeuk Kim (1): f2fs: introduce f2fs_base_attr for global sysfs entries
Jakob Unterwurzacher (1): net: dsa: microchip: linearize skb for tail-tagging switches
Jakub Kicinski (3): eth: mlx4: don't try to complete XDP frames in netpoll net: page_pool: avoid false positive warning if NAPI was never added tools: ynl-gen: don't output external constants
Jan Kara (1): jbd2: do not try to recover wiped journal
Janne Grunau (1): soc: apple: rtkit: Use high prio work queue
Jason Andryuk (1): xenbus: Allow PVH dom0 a non-local xenstore
Jason Gunthorpe (1): genirq/msi: Store the IOMMU IOVA directly in msi_desc instead of iommu_cookie
Jeff Chen (1): wifi: mwifiex: Fix HT40 bandwidth issue.
Jens Axboe (1): io_uring/fdinfo: annotate racy sq/cq head/tail reads
Jernej Skrabec (1): Revert "arm64: dts: allwinner: h6: Use RSB for AXP805 PMIC connection"
Jessica Zhang (1): drm: Add valid clones check
Jianbo Liu (1): net/mlx5e: Add correct match to check IPSec syndromes for switchdev mode
Jiang Liu (1): drm/amdgpu: reset psp->cmd to NULL after releasing the buffer
Jiasheng Jiang (1): dpll: Add an assertion to check freq_supported_num
Jing Su (1): dql: Fix dql->limit value when reset.
Jing Zhou (1): drm/amd/display: Guard against setting dispclk low for dcn31x
Jinliang Zheng (1): dm: fix unconditional IO throttle caused by REQ_PREFLUSH
Jinqian Yang (1): arm64: Add support for HIP09 Spectre-BHB mitigation
Johannes Berg (7): wifi: iwlwifi: fix debug actions order wifi: iwlwifi: mark Br device not integrated wifi: mac80211: fix warning on disconnect during failed ML reconf wifi: iwlwifi: use correct IMR dump variable wifi: mac80211: don't unconditionally call drv_mgd_complete_tx() wifi: mac80211: remove misplaced drv_mgd_complete_tx() call wifi: iwlwifi: add support for Killer on MTL
Johannes Thumshirn (1): block: only update request sector if needed
John Olender (1): drm/amd/display: Defer BW-optimization-blocked DRR adjustments
Jon Hunter (1): arm64: tegra: Resize aperture for the IGX PCIe C5 slot
Jonas Karlman (1): net: stmmac: dwmac-rk: Validate GRF and peripheral GRF during probe
Jonathan Kim (1): drm/amdkfd: set precise mem ops caps to disabled for gfx 11 and 12
Jonathan McDowell (1): tpm: Convert warn to dbg in tpm2_start_auth_session()
Jordan Crouse (1): clk: qcom: camcc-sm8250: Use clk_rcg2_shared_ops for some RCGs
Josh Poimboeuf (2): objtool: Properly disable uaccess validation objtool: Fix error handling inconsistencies in check()
Joshua Aberback (1): drm/amd/display: Increase block_sequence array size
Justin Tee (3): scsi: lpfc: Handle duplicate D_IDs in ndlp search-by D_ID routine scsi: lpfc: Ignore ndlp rport mismatch in dev_loss_tmo callbk scsi: lpfc: Free phba irq in lpfc_sli4_enable_msi() when pci_irq_vector() fails
Kai Mäkisara (4): scsi: st: Tighten the page format heuristics with MODE SELECT scsi: st: ERASE does not change tape location scsi: scsi_debug: First fixes for tapes scsi: st: Restore some drive settings after reset
Kai Vehmanen (1): ASoc: SOF: topology: connect DAI to a single DAI link
Karl Chan (1): clk: qcom: ipq5018: allow it to be bulid on arm32
Kaustabh Chakraborty (1): mmc: dw_mmc: add exynos7870 DW MMC support
Kees Cook (6): PNP: Expand length of fixup id string net/mlx4_core: Avoid impossible mlx4_db_alloc() order value pstore: Change kmsg_bytes storage size to u32 btrfs: compression: adjust cb->compressed_folios allocation type mm: vmalloc: actually use the in-place vrealloc region mm: vmalloc: only zero-init on vrealloc shrink
Kevin Krakauer (1): selftests/net: have `gro.sh -t` return a correct exit code
Konstantin Andreev (2): smack: recognize ipv4 CIPSO w/o categories smack: Revert "smackfs: Added check catlen"
Konstantin Shkolnyy (1): vdpa/mlx5: Fix mlx5_vdpa_get_config() endianness on big-endian machines
Konstantin Taranov (1): net/mana: fix warning in the writer of client oob
Krzysztof Kozlowski (4): ASoC: codecs: wsa884x: Correct VI sense channel mask ASoC: codecs: wsa883x: Correct VI sense channel mask can: c_can: Use of_property_present() to test existence of DT property clk: qcom: clk-alpha-pll: Do not use random stack value for recalc rate
Kuan-Chung Chen (1): wifi: rtw89: 8922a: fix incorrect STA-ID in EHT MU PPDU
Kuhanh Murugasen Krishnan (1): fpga: altera-cvp: Increase credit timeout
Kuninori Morimoto (1): ASoC: soc-dai: check return value at snd_soc_dai_set_tdm_slot()
Kuniyuki Iwashima (2): ipv4: fib: Move fib_valid_key_len() to rtm_to_fib_config(). ip: fib_rules: Fetch net from fib_rule in fib[46]_rule_configure().
Kurt Borja (1): hwmon: (dell-smm) Increment the number of fans
Lad Prabhakar (1): clk: renesas: rzg2l-cpg: Refactor Runtime PM clock validation
Larisa Grigore (2): spi: spi-fsl-dspi: restrict register range for regmap access spi: spi-fsl-dspi: Reset SR flags before sending a new message
Len Brown (1): tools/power turbostat: Clustered Uncore MHz counters should honor show/hide options
Leo Zeng (1): Revert "drm/amd/display: Request HW cursor on DCN3.2 with SubVP"
Leon Huang (1): drm/amd/display: Fix incorrect DPCD configs while Replay/PSR switch
Leon Romanovsky (1): xfrm: prevent high SEQ input in non-ESN mode
Li Bin (1): ARM: at91: pm: fix at91_suspend_finish for ZQ calibration
Lijo Lazar (2): drm/amd/pm: Fetch current power limit from PMFW drm/amdgpu: Use active umc info from discovery
Lin.Cao (1): drm/buddy: fix issue that force_merge cannot free all roots
Linus Torvalds (3): gcc-15: make 'unterminated string initialization' just a warning gcc-15: disable '-Wunterminated-string-initialization' entirely for now Fix mis-uses of 'cc-option' for warning disablement
Linus Walleij (1): ASoC: pcm6240: Drop bogus code handling IRQ as GPIO
Lorenzo Stoakes (1): intel_th: avoid using deprecated page->mapping, index fields
Lucas De Marchi (2): drm/xe: Stop ignoring errors from xe_ttm_stolen_mgr_init() drm/xe: Fix xe_tile_init_noalloc() error propagation
Luis de Arquer (1): spi-rockchip: Fix register out of bounds access
Luiz Augusto von Dentz (1): Bluetooth: L2CAP: Fix not checking l2cap_chan security level
Maarten Lankhorst (2): drm/xe: Move suballocator init to after display init drm/xe: Do not attempt to bootstrap VF in execlists mode
Maciej S. Szmigiero (1): ALSA: hda/realtek: Enable PC beep passthrough for HP EliteBook 855 G7
Maher Sanalla (1): RDMA/uverbs: Propagate errors from rdma_lookup_get_uobject()
Manish Pandey (1): scsi: ufs: Introduce quirk to extend PA_HIBERN8TIME for UFS devices
Marcos Paulo de Souza (1): printk: Check CON_SUSPEND when unblanking a console
Marek Szyprowski (1): dma-mapping: avoid potential unused data compilation warning
Marek Vasut (1): leds: trigger: netdev: Configure LED blink interval for HW offload
Mario Limonciello (1): Revert "drm/amd: Keep display off while going into S4"
Mark Harmstone (1): btrfs: avoid linker error in btrfs_find_create_tree_block()
Mark Pearson (1): platform/x86: think-lmi: Fix attribute name usage for non-compliant items
Markus Elfring (1): media: c8sectpfe: Call of_node_put(i2c_bus) only once in c8sectpfe_probe()
Martin Blumenstingl (1): pinctrl: meson: define the pull up/down resistor value as 60 kOhm
Martin KaFai Lau (1): bpf: Use kallsyms to find the function name of a struct_ops's stub function
Martin Povišer (1): ASoC: ops: Enforce platform maximum on initial value
Martin Tsai (1): drm/amd/display: Support multiple options during psr entry.
Masahiro Yamada (1): kconfig: do not clear SYMBOL_VALID when reading include/config/auto.conf
Matt Johnston (1): fuse: Return EPERM rather than ENOSYS from link()
Matthew Brost (2): drm/xe: Nuke VM's mapping upon close drm/xe: Retry BO allocation
Matthew Sakai (1): dm vdo: use a short static string for thread name prefix
Matthew Wilcox (Oracle) (2): orangefs: Do not truncate file size highmem: add folio_test_partial_kmap()
Matthias Fend (1): media: tc358746: improve calculation of the D-PHY timing registers
Matthieu Baerts (NGI0) (1): mptcp: pm: userspace: flags: clearer msg if no remote addr
Matti Lehtimäki (2): remoteproc: qcom_wcnss: Handle platforms with only single power domain remoteproc: qcom_wcnss: Fix on platforms without fallback regulators
Michael Margolin (1): RDMA/core: Fix best page size finding when it can cross SG entries
Michal Pecio (1): usb: xhci: Don't change the status of stalled TDs on failed Stop EP
Michal Swiatkowski (3): ice: init flow director before RDMA ice: treat dyn_allowed only as suggestion ice: count combined queues using Rx/Tx count
Michal Wajdeczko (3): drm/xe/relay: Don't use GFP_KERNEL for new transactions drm/xe/pf: Reset GuC VF config when unprovisioning critical resource drm/xe/sa: Always call drm_suballoc_manager_fini()
Mika Westerberg (1): thunderbolt: Do not add non-active NVM if NVM upgrade is disabled for retimer
Mike Christie (1): vhost-scsi: Return queue full for page alloc failures during copy
Mikulas Patocka (1): dm: restrict dm device size to 2^63-512 bytes
Ming Lei (1): blk-throttle: don't take carryover for prioritized processing of metadata
Ming Yen Hsieh (1): wifi: mt76: mt7925: load the appropriate CLC data based on hardware type
Ming-Hung Tsai (1): dm cache: prevent BUG_ON by blocking retries on failed device resumes
Miri Korenblit (2): wifi: iwlwifi: don't warn when if there is a FW error wifi: iwlwifi: don't warn during reprobe
Moshe Shemesh (1): net/mlx5: Avoid report two health errors on same syndrome
Mrinmay Sarkar (1): PCI: epf-mhi: Update device ID for SA8775P
Mykyta Yatsenko (1): bpf: Return prog btf_id without capable check
Naman Trivedi (1): arm64: zynqmp: add clock-output-names property in clock nodes
Namjae Jeon (2): cifs: add validation check for the fields in smb_aces ksmbd: fix stream write failure
Nandakumar Edamana (1): libbpf: Fix out-of-bound read
Nathan Chancellor (2): kbuild: Properly disable -Wunterminated-string-initialization for clang i3c: master: svc: Fix implicit fallthrough in svc_i3c_master_ibi_work()
Nicholas Kazlauskas (2): drm/amd/display: Ensure DMCUB idle before reset on DCN31/DCN35 drm/amd/display: Guard against setting dispclk low when active
Nicholas Susanto (1): drm/amd/display: Enable urgent latency adjustment on DCN35
Nick Hu (1): clocksource/drivers/timer-riscv: Stop stimecmp when cpu hotplug
Nicolas Bouchinet (2): netfilter: conntrack: Bound nf_conntrack sysctl writes scsi: logging: Fix scsi_logging_level bounds
Nicolas Bretz (1): ext4: on a remount, only log the ro or r/w state when it has changed
Nicolas Escande (1): wifi: ath12k: fix ath12k_hal_tx_cmd_ext_desc_setup() info1 override
Niklas Cassel (1): misc: pci_endpoint_test: Give disabled BARs a distinct error code
Niklas Neronin (1): usb: xhci: set page size to the xHCI-supported size
Niklas Söderlund (1): media: adv7180: Disable test-pattern control on adv7180
Nir Lichtman (1): x86/build: Fix broken copy command in genimage.sh when making isoimage
NÃcolas F. R. A. Prado (4): thermal/drivers/mediatek/lvts: Start sensor interrupts disabled ASoC: mediatek: mt6359: Add stub for mt6359_accdet_enable_jack_detect ASoC: mediatek: mt8188: Treat DMIC_GAINx_CUR as non-volatile ASoC: mediatek: mt8188: Add reference for dmic clocks
Oak Zeng (1): drm/xe: Reject BO eviction if BO is bound to current VM
Oliver Hartkopp (2): can: bcm: add locking for bcm_op runtime updates can: bcm: add missing rcu read protection for procfs content
Olivier Moysan (1): drm: bridge: adv7511: fill stream capabilities
Ovidiu Bunea (1): drm/amd/display: Exit idle optimizations before accessing PHY
P Praneesh (3): wifi: ath12k: fix the ampdu id fetch in the HAL_RX_MPDU_START TLV wifi: ath12k: Fix end offset bit definition in monitor ring descriptor wifi: ath11k: Use dma_alloc_noncoherent for rx_tid buffer allocation
Pali Rohár (7): cifs: Add fallback for SMB2 CREATE without FILE_READ_ATTRIBUTES cifs: Fix querying and creating MF symlinks over SMB1 cifs: Fix negotiate retry functionality cifs: Set default Netbios RFC1001 server name to hostname in UNC cifs: Fix establishing NetBIOS session for SMB2+ connection cifs: Fix and improve cifs_query_path_info() and cifs_query_file_info() cifs: Fix changing times and read-only attr over SMB1 smb_set_file_info() function
Patrisious Haddad (1): net/mlx5: Change POOL_NEXT_SIZE define value and make it global
Paul Burton (2): MIPS: pm-cps: Use per-CPU variables as per-CPU, not per-core clocksource: mips-gic-timer: Enable counter when CPUs start
Paul Chaignon (1): xfrm: Sanitize marks before insert
Paul E. McKenney (1): rcu: Fix get_state_synchronize_rcu_full() GP-start detection
Paul Elder (1): media: imx335: Set vblank immediately
Paul Kocialkowski (1): net: dwmac-sun8i: Use parsed internal PHY address instead of 1
Pavan Kumar Linga (1): idpf: fix null-ptr-deref in idpf_features_check
Pavel Begunkov (3): io_uring: don't duplicate flushing in io_req_post_cqe io_uring/msg: initialise msg request opcode io_uring: fix overflow resched cqe reordering
Pavel Nikulin (1): platform/x86: asus-wmi: Disable OOBE state after resume from hibernation
Paweł Anikiel (1): x86/Kconfig: make CFI_AUTO_DEFAULT depend on !RUST or Rust >= 1.88
Pedro Nishiyama (1): Bluetooth: Disable SCO support if READ_VOICE_SETTING is unsupported/broken
Peichen Huang (1): drm/amd/display: not abort link train when bw is low
Pengyu Luo (1): cpufreq: Add SM8650 to cpufreq-dt-platdev blocklist
Peter Seiderer (2): net: pktgen: fix mpls maximum labels list parsing net: pktgen: fix access outside of user given buffer in pktgen_thread_write()
Peter Ujfalusi (3): ASoC: SOF: ipc4-control: Use SOF_CTRL_CMD_BINARY as numid for bytes_ext ASoC: SOF: Intel: hda-bus: Use PIO mode on ACE2+ platforms ASoC: SOF: ipc4-pcm: Delay reporting is only supported for playback direction
Peter Zijlstra (3): perf/core: Clean up perf_try_init_event() x86/ibt: Handle FineIBT in handle_cfi_failure() x86/traps: Cleanup and robustify decode_bug()
Peter Zijlstra (Intel) (1): perf: Avoid the read if the count is already updated
Petr Machata (2): vxlan: Join / leave MC group after remote changes bridge: mdb: Allow replace of a host-joined group
Philip Redkin (1): x86/mm: Check return value from memblock_phys_alloc_range()
Philip Yang (1): drm/amdkfd: KFD release_work possible circular locking
Ping-Ke Shih (7): wifi: rtw89: fw: propagate error code from rtw89_h2c_tx() wifi: rtw89: fw: get sb_sel_ver via get_unaligned_le32() wifi: rtw89: fw: add blacklist to avoid obsolete secure firmware wifi: rtw89: fw: validate multi-firmware header before getting its size wifi: rtw89: fw: validate multi-firmware header before accessing wifi: rtw89: call power_on ahead before selecting firmware wifi: rtw89: add wiphy_lock() to work that isn't held wiphy_lock() yet
Prathamesh Shete (1): pinctrl-tegra: Restore SFSEL bit when freeing pins
Qu Wenruo (2): btrfs: run btrfs_error_commit_super() early btrfs: avoid NULL pointer dereference if no valid csum tree
Quan Zhou (1): wifi: mt76: mt7925: fix fails to enter low power mode in suspend state
Raag Jadav (2): devres: Introduce devm_kmemdup_array() err.h: move IOMEM_ERR_PTR() to err.h
Rafael J. Wysocki (1): cpuidle: menu: Avoid discarding useful information
Ramasamy Kaliappan (1): wifi: ath12k: Improve BSS discovery with hidden SSID in 6 GHz band
Ranjan Kumar (2): scsi: mpi3mr: Add level check to control event logging scsi: mpi3mr: Update timestamp only for supervisor IOCs
Ravi Bangoria (2): perf/amd/ibs: Fix perf_ibs_op.cnt_mask for CurCnt perf/amd/ibs: Fix ->config to sample period calculation for OP PMU
Rex Lu (1): wifi: mt76: mt7996: fix SER reset trigger on WED reset
Ricardo Ribalda (2): media: uvcvideo: Add sanity check to uvc_ioctl_xu_ctrl_map media: uvcvideo: Handle uvc menu translation inside uvc_get_le_value
Ritesh Harjani (IBM) (1): book3s64/radix: Fix compile errors when CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=n
Rob Herring (Arm) (1): perf: arm_pmuv3: Call kvm_vcpu_pmu_resync_el0() before enabling counters
Robert Richter (1): libnvdimm/labels: Fix divide error in nd_label_data_init()
Robin Murphy (1): iommu: Keep dev->iommu state consistent
Roger Pau Monne (2): PCI: vmd: Disable MSI remapping bypass under Xen xen/pci: Do not register devices with segments >= 0x10000
Ronak Doshi (1): vmxnet3: update MTU after device quiesce
Rosen Penev (1): wifi: ath9k: return by of_get_mac_address
Ryan Roberts (2): arm64/mm: Check pmd_table() in pmd_trans_huge() arm64/mm: Check PUD_TYPE_TABLE in pud_bad()
Ryan Walklin (1): ASoC: sun4i-codec: support hp-det-gpios property
Ryo Takakura (1): lockdep: Fix wait context check on softirq for PREEMPT_RT
Ryusuke Konishi (1): nilfs2: fix deadlock warnings caused by lock dependency in init_nilfs()
Sabrina Dubroca (2): espintcp: fix skb leaks espintcp: remove encap socket caching to avoid reference leak
Sagi Maimon (1): ptp: ocp: Limit signal/freq counts in summary output functions
Sakari Ailus (1): media: v4l: Memset argument to 0 before calling get_mbus_config pad op
Saket Kumar Bhaskar (1): perf/hw_breakpoint: Return EOPNOTSUPP for unsupported breakpoint type
Salah Triki (1): smb: server: smb2pdu: check return value of xa_store()
Samuel Holland (1): riscv: Allow NOMMU kernels to access all of RAM
Saranya Gopal (1): platform/x86/intel: hid: Add Pantherlake support
Satyanarayana K V P (2): drm/xe/vf: Retry sending MMIO request to GUC on timeout error drm/xe/pf: Create a link between PF and VF devices
Sean Anderson (1): spi: zynqmp-gqspi: Always acknowledge interrupts
Sean Wang (1): Bluetooth: btmtksdio: Prevent enabling interrupts after IRQ handler removal
Seongman Lee (1): x86/sev: Fix operator precedence in GHCB_MSR_VMPL_REQ_LEVEL macro
Sergio Perez Gonzalez (1): spi: spi-mux: Fix coverity issue, unchecked return value
Seyediman Seyedarab (1): kbuild: fix argument parsing in scripts/config
Shahar Shitrit (2): net/mlx5: Modify LSB bitmask in temperature event to include only the first bit net/mlx5: Apply rate-limiting to high temperature warning
Shashank Gupta (1): crypto: octeontx2 - suppress auth failure screaming due to negative tests
Shivasharan S (1): scsi: mpt3sas: Send a diag reset if target reset fails
Shiwu Zhang (1): drm/amdgpu: enlarge the VBIOS binary size limit
Shixiong Ou (1): fbdev: fsl-diu-fb: add missing device_remove_file()
Shree Ramamoorthy (1): mfd: tps65219: Remove TPS65219_REG_TI_DEV_ID check
Shuicheng Lin (1): drm/xe/debugfs: Add missing xe_pm_runtime_put in wedge_mode_set
Simona Vetter (1): drm/atomic: clarify the rules around drm_atomic_state->allow_modeset
Siva Durga Prasad Paladugu (1): firmware: xilinx: Dont send linux address to get fpga config get status
Soeren Moch (1): wifi: rtl8xxxu: retry firmware download on error
Sohil Mehta (2): x86/smpboot: Fix INIT delay assignment for extended Intel Families x86/microcode: Update the Intel processor flag scan check
Stanimir Varbanov (2): PCI: brcmstb: Expand inbound window size up to 64GB PCI: brcmstb: Add a softdep to MIP MSI-X driver
Stanley Chu (1): i3c: master: svc: Fix missing STOP for master request
Stefan Binding (1): ASoC: intel/sdw_utils: Add volume limit to cs42l43 speakers
Stefan Wahren (3): staging: vchiq_arm: Create keep-alive thread during probe drm/v3d: Add clock handling dmaengine: fsl-edma: Fix return code for unhandled interrupts
Stefano Garzarella (1): vhost_task: fix vhost_task_create() documentation
Stephan Gerhold (1): i2c: qup: Vote for interconnect bandwidth to DRAM
Subbaraya Sundeep (1): octeontx2-af: Set LMT_ENA bit for APR table entries
Sudeep Holla (4): mailbox: pcc: Use acpi_os_ioremap() instead of ioremap() firmware: arm_ffa: Reject higher major version as incompatible firmware: arm_ffa: Handle the presence of host partition in the partition info firmware: arm_scmi: Relax duplicate name constraint across protocol ids
Suman Ghosh (1): octeontx2-pf: Add AF_XDP non-zero copy support
Sungjong Seo (1): exfat: call bh_read in get_block only when necessary
Sven Schwermer (1): crypto: mxs-dcp - Only set OTP_KEY bit for OTP key
Svyatoslav Ryhel (1): ARM: tegra: Switch DSI-B clock parent to PLLD on Tegra114
Takashi Iwai (5): ALSA: seq: Improve data consistency at polling ASoC: Intel: bytcr_rt5640: Add DMI quirk for Acer Aspire SW3-013 ALSA: hda/realtek: Add quirk for HP Spectre x360 15-df1xxx ALSA: usb-audio: Fix duplicated name in MIDI substream names ALSA: pcm: Fix race of buffer access at PCM OSS layer
Taniya Das (1): clk: qcom: lpassaudiocc-sc7280: Add support for LPASS resets for QCM6490
Tavian Barnes (1): ASoC: SOF: Intel: hda: Fix UAF when reloading module
Thangaraj Samynathan (1): net: lan743x: Restore SGMII CTRL register on resume
Thomas Gleixner (1): posix-timers: Ensure that timer initialization is fully visible
Thomas Huth (1): x86/headers: Replace __ASSEMBLY__ with __ASSEMBLER__ in UAPI headers
Thomas Weißschuh (1): timer_list: Don't use %pK through printk()
Thomas Zimmermann (3): drm/gem: Test for imported GEM buffers with helper drm/ast: Find VBIOS mode from regular display size drm/gem: Internally test import_attach for imported objects
Tianyang Zhang (1): mm/page_alloc.c: avoid infinite retries caused by cpuset race
Tiwei Bie (1): um: Update min_low_pfn to match changes in uml_reserved
Tobias Brunner (1): xfrm: Fix UDP GRO handling for some corner cases
Tom Chung (1): drm/amd/display: Initial psr_version with correct setting
Trond Myklebust (7): NFSv4: Check for delegation validity in nfs_start_delegation_return_locked() NFS: Don't allow waiting for exiting tasks SUNRPC: Don't allow waiting for exiting tasks NFSv4: Treat ENETUNREACH errors as fatal for state recovery SUNRPC: rpc_clnt_set_transport() must not change the autobind setting SUNRPC: rpcbind should never reset the port to the value '0' pNFS/flexfiles: Report ENETDOWN as a connection error
Tudor Ambarus (1): mailbox: use error ret code of of_parse_phandle_with_args()
Uday Shankar (1): ublk: enforce ublks_max only for unprivileged devices
Umesh Nerlige Ramappa (1): drm/xe/oa: Ensure that polled read returns latest data
Uros Bizjak (1): x86/locking: Use ALT_OUTPUT_SP() for percpu_{,try_}cmpxchg{64,128}_op()
Valentin Caron (1): pinctrl: devicetree: do not goto err when probing hogs in pinctrl_dt_to_map
Vasant Hegde (1): iommu/amd/pgtbl_v2: Improve error handling
Vicki Pfau (1): Input: xpad - add more controllers
Victor Lu (1): drm/amdgpu: Do not program AGP BAR regs under SRIOV in gfxhub_v1_0.c
Victor Skvortsov (1): drm/amdgpu: Skip pcie_replay_count sysfs creation for VF
Vijendar Mukunda (1): soundwire: amd: change the soundwire wake enable/disable sequence
Viktor Malik (1): bpftool: Fix readlink usage in get_fd_type
Vinicius Costa Gomes (1): dmaengine: idxd: Fix allowing write() from different address spaces
Vinith Kumar R (1): wifi: ath12k: Report proper tx completion status to mac80211
Viresh Kumar (1): firmware: arm_ffa: Set dma_mask for ffa devices
Vitalii Mordan (1): i2c: pxa: fix call balance of i2c->clk handling routines
Vladimir Kondratiev (1): irqchip/riscv-aplic: Add support for hart indexes
Vladimir Moskovkin (1): platform/x86: dell-wmi-sysman: Avoid buffer overflow in current_password_store()
Vladimir Oltean (1): net: enetc: refactor bulk flipping of RX buffers to separate function
Waiman Long (1): x86/nmi: Add an emergency handler in nmi_desc & use it in nmi_shootdown_cpus()
Wang Liang (1): net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done
Wang Zhaolong (3): smb: client: Store original IO parameters and prevent zero IO sizes smb: client: Fix use-after-free in cifs_fill_dirent smb: client: Reset all search buffer pointers when releasing buffer
Wentao Guan (2): nvme-pci: add quirks for device 126f:1001 nvme-pci: add quirks for WDC Blue SN550 15b7:5009
Willem de Bruijn (1): ipv6: save dontfrag in cork
William Tu (3): net/mlx5e: set the tx_queue_len for pfifo_fast net/mlx5e: reduce rep rxq depth to 256 for ECPF net/mlx5e: reduce the max log mpwrq sz for ECPF and reps
Xiao Liang (1): net: ipv6: Init tunnel link-netns before registering dev
Xiaofei Tan (1): ACPI: HED: Always initialize before evged
Xin Li (Intel) (1): x86/fred: Fix system hang during S4 resume with FRED enabled
Xin Wang (1): drm/xe/debugfs: fixed the return value of wedged_mode_set
Yeoreum Yun (1): coresight-etb10: change etb_drvdata spinlock's type to raw_spinlock_t
Yi Liu (2): iommufd: Extend IOMMU_GET_HW_INFO to report PASID capability iommufd: Disallow allocating nested parent domain with fault ID
Yihan Zhu (1): drm/amd/display: handle max_downscale_src_width fail check
Yonghong Song (1): bpf: Allow pre-ordering for bpf cgroup progs
Youssef Samir (1): accel/qaic: Mask out SR-IOV PCI resources
Yuanjun Gong (1): leds: pwm-multicolor: Add check for fwnode_property_read_u32
Zhang Rui (1): thermal: intel: x86_pkg_temp_thermal: Fix bogus trip temperature
Zhang Yi (2): ext4: don't write back data before punch hole in nojournal mode ext4: remove writable userspace mappings before truncating page cache
Zhi Wang (1): drm/nouveau: fix the broken marco GSP_MSG_MAX_SIZE
Zhikai Zhai (1): drm/amd/display: calculate the remain segments for all pipes
Zhongqiu Han (1): virtio_ring: Fix data race by tagging event_triggered as racy for KCSAN
Zhongwei Zhang (1): drm/amd/display: Correct timing_adjust_pending flag setting.
Zsolt Kajtar (2): fbcon: Use correct erase colour for clearing in fbcon fbdev: core: tileblit: Implement missing margin clearing for tileblit
feijuan.li (1): drm/edid: fixed the bug that hdr metadata was not reset
gaoxu (1): cgroup: Fix compilation issue due to cgroup_mutex not being exported
junan (1): HID: usbkbd: Fix the bit shift number for LED_KANA
shantiprasad shettar (1): bnxt_en: Query FW parameters when the CAPS_CHANGE bit is set
zihan zhou (1): sched: Reduce the default slice to avoid tasks getting an extra tick
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index e691f75c97e7..b5cb36148554 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6261,6 +6261,8 @@
Selecting 'on' will also enable the mitigation against user space to user space task attacks. + Selecting specific mitigation does not force enable + user mitigations.
Selecting 'off' will disable both the kernel and the user space protections. diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst index 84b43061c11b..60434f2b0286 100644 --- a/Documentation/driver-api/serial/driver.rst +++ b/Documentation/driver-api/serial/driver.rst @@ -103,4 +103,4 @@ Some helpers are provided in order to set/get modem control lines via GPIO. .. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms - mctrl_gpio_disable_ms + mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst index 74905675d71f..5a4edb6565cf 100644 --- a/Documentation/hwmon/dell-smm-hwmon.rst +++ b/Documentation/hwmon/dell-smm-hwmon.rst @@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard =============================== ======= ======================================= Name Perm Description =============================== ======= ======================================= -fan[1-3]_input RO Fan speed in RPM. -fan[1-3]_label RO Fan label. -fan[1-3]_min RO Minimal Fan speed in RPM -fan[1-3]_max RO Maximal Fan speed in RPM -fan[1-3]_target RO Expected Fan speed in RPM -pwm[1-3] RW Control the fan PWM duty-cycle. +fan[1-4]_input RO Fan speed in RPM. +fan[1-4]_label RO Fan label. +fan[1-4]_min RO Minimal Fan speed in RPM +fan[1-4]_max RO Maximal Fan speed in RPM +fan[1-4]_target RO Expected Fan speed in RPM +pwm[1-4] RW Control the fan PWM duty-cycle. pwm1_enable WO Enable or disable automatic BIOS fan control (not supported on all laptops, see below for details). @@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches! ---------------------------
The driver also exports the fans as thermal cooling devices with -``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control +``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control using one of the thermal governors.
Module parameters diff --git a/Makefile b/Makefile index 6e8afa78bbef..18c2a7cf9e91 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 12 -SUBLEVEL = 30 +SUBLEVEL = 31 EXTRAVERSION = NAME = Baby Opossum Posse
@@ -997,10 +997,6 @@ NOSTDINC_FLAGS += -nostdinc # perform bounds checking. KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
-#Currently, disable -Wstringop-overflow for GCC 11, globally. -KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow) -KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) - # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += -fno-strict-overflow
diff --git a/arch/arm/boot/dts/nvidia/tegra114.dtsi b/arch/arm/boot/dts/nvidia/tegra114.dtsi index 86f14e2fd29f..6c057b506951 100644 --- a/arch/arm/boot/dts/nvidia/tegra114.dtsi +++ b/arch/arm/boot/dts/nvidia/tegra114.dtsi @@ -139,7 +139,7 @@ dsib: dsi@54400000 { reg = <0x54400000 0x00040000>; clocks = <&tegra_car TEGRA114_CLK_DSIB>, <&tegra_car TEGRA114_CLK_DSIBLP>, - <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>; + <&tegra_car TEGRA114_CLK_PLL_D_OUT0>; clock-names = "dsi", "lp", "parent"; resets = <&tegra_car 82>; reset-names = "dsi"; diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 05a1547642b6..6c3e6aa22606 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -545,11 +545,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
static int at91_suspend_finish(unsigned long val) { - unsigned char modified_gray_code[] = { - 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, - 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, - 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, - 0x10, 0x11, + /* SYNOPSYS workaround to fix a bug in the calibration logic */ + unsigned char modified_fix_code[] = { + 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13, + 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14, + 0x1e, 0x1f, }; unsigned int tmp, index; int i; @@ -560,25 +561,25 @@ static int at91_suspend_finish(unsigned long val) * restore the ZQ0SR0 with the value saved here. But the * calibration is buggy and restoring some values from ZQ0SR0 * is forbidden and risky thus we need to provide processed - * values for these (modified gray code values). + * values for these. */ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
/* Store pull-down output impedance select. */ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
/* Store pull-up output impedance select. */ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
/* Store pull-down on-die termination impedance select. */ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
/* Store pull-up on-die termination impedance select. */ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
/* * The 1st 8 words of memory might get corrupted in the process diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts index 3be1e8c2fdb9..1012103df25f 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts @@ -151,28 +151,12 @@ &pio { vcc-pg-supply = <®_aldo1>; };
-&r_ir { - linux,rc-map-name = "rc-beelink-gs1"; - status = "okay"; -}; - -&r_pio { - /* - * FIXME: We can't add that supply for now since it would - * create a circular dependency between pinctrl, the regulator - * and the RSB Bus. - * - * vcc-pl-supply = <®_aldo1>; - */ - vcc-pm-supply = <®_aldo1>; -}; - -&r_rsb { +&r_i2c { status = "okay";
- axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; @@ -290,6 +274,22 @@ sw { }; };
+&r_ir { + linux,rc-map-name = "rc-beelink-gs1"; + status = "okay"; +}; + +&r_pio { + /* + * PL0 and PL1 are used for PMIC I2C + * don't enable the pl-supply else + * it will fail at boot + * + * vcc-pl-supply = <®_aldo1>; + */ + vcc-pm-supply = <®_aldo1>; +}; + &spdif { pinctrl-names = "default"; pinctrl-0 = <&spdif_tx_pin>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts index 6c3bfe3d09d9..14cc99b21622 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts @@ -175,16 +175,12 @@ &pio { vcc-pg-supply = <®_vcc_wifi_io>; };
-&r_ir { - status = "okay"; -}; - -&r_rsb { +&r_i2c { status = "okay";
- axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; @@ -295,6 +291,10 @@ sw { }; };
+&r_ir { + status = "okay"; +}; + &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi index 13b07141c334..bab0f63dcc5b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi @@ -112,20 +112,12 @@ &pio { vcc-pg-supply = <®_aldo1>; };
-&r_ir { - status = "okay"; -}; - -&r_pio { - vcc-pm-supply = <®_bldo3>; -}; - -&r_rsb { +&r_i2c { status = "okay";
- axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; @@ -240,6 +232,14 @@ sw { }; };
+&r_ir { + status = "okay"; +}; + +&r_pio { + vcc-pm-supply = <®_bldo3>; +}; + &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi index 3a9b6907185d..242820845707 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi @@ -26,6 +26,8 @@ memory@0 {
leds { compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&spi_quad_pins>;
led-power1 { label = "udpu:green:power"; @@ -82,8 +84,6 @@ &sdhci0 {
&spi0 { status = "okay"; - pinctrl-names = "default"; - pinctrl-0 = <&spi_quad_pins>;
flash@0 { compatible = "jedec,spi-nor"; @@ -108,6 +108,10 @@ partition@180000 { }; };
+&spi_quad_pins { + function = "gpio"; +}; + &pinctrl_nb { i2c2_recovery_pins: i2c2-recovery-pins { groups = "i2c2"; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi index 63b94a04308e..38d49d612c0c 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi @@ -1686,7 +1686,7 @@ vdd_1v8_dis: regulator-vdd-1v8-dis { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-always-on; - gpio = <&exp1 14 GPIO_ACTIVE_HIGH>; + gpio = <&exp1 9 GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&vdd_1v8>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts index 36e888053746..9ce55b4d2de8 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts @@ -302,6 +302,16 @@ pcie@14160000 { };
pcie@141a0000 { + reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */ + 0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */ + 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */ + 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */ + 0x2e 0x20000000 0x0 0x10000000>; /* ECAM (256MB) */ + + ranges = <0x81000000 0x00 0x3a100000 0x00 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */ + 0x82000000 0x00 0x40000000 0x2e 0x30000000 0x0 0x08000000 /* non-prefetchable memory (128MB) */ + 0xc3000000 0x28 0x00000000 0x28 0x00000000 0x6 0x20000000>; /* prefetchable memory (25088MB) */ + status = "okay"; vddio-pex-ctl-supply = <&vdd_1v8_ls>; phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi index 60d1b1acf9a0..385fed8a852a 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi @@ -10,39 +10,44 @@
#include <dt-bindings/clock/xlnx-zynqmp-clk.h> / { - pss_ref_clk: pss_ref_clk { + pss_ref_clk: pss-ref-clk { bootph-all; compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <33333333>; + clock-output-names = "pss_ref_clk"; };
- video_clk: video_clk { + video_clk: video-clk { bootph-all; compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <27000000>; + clock-output-names = "video_clk"; };
- pss_alt_ref_clk: pss_alt_ref_clk { + pss_alt_ref_clk: pss-alt-ref-clk { bootph-all; compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <0>; + clock-output-names = "pss_alt_ref_clk"; };
- gt_crx_ref_clk: gt_crx_ref_clk { + gt_crx_ref_clk: gt-crx-ref-clk { bootph-all; compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <108000000>; + clock-output-names = "gt_crx_ref_clk"; };
- aux_ref_clk: aux_ref_clk { + aux_ref_clk: aux-ref-clk { bootph-all; compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <27000000>; + clock-output-names = "aux_ref_clk"; }; };
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 8a6b7feca3e4..d92a0203e5a9 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -132,6 +132,7 @@ #define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01 +#define HISI_CPU_PART_HIP09 0xD02
#define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_FIRESTORM 0x023 @@ -208,6 +209,7 @@ #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) +#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) #define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index c329ea061dc9..5ba8376735cb 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -554,18 +554,6 @@ static inline int pmd_protnone(pmd_t pmd) #endif
#define pmd_present(pmd) pte_present(pmd_pte(pmd)) - -/* - * THP definitions. - */ - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pmd_trans_huge(pmd_t pmd) -{ - return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) @@ -725,6 +713,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + /* + * If pmd is present-invalid, pmd_table() won't detect it + * as a table, so force the valid bit for the comparison. + */ + return pmd_val(pmd) && pmd_present(pmd) && + !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID)); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 static inline bool pud_sect(pud_t pud) { return false; } static inline bool pud_table(pud_t pud) { return true; } @@ -806,7 +806,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (!pud_table(pud)) +#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \ + PUD_TYPE_TABLE) #define pud_present(pud) pte_present(pud_pte(pud)) #ifndef __PAGETABLE_PMD_FOLDED #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 8ef3335ecff7..31eaf15d2079 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -904,6 +904,7 @@ static u8 spectre_bhb_loop_affected(void) MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD), + MIDR_ALL_VERSIONS(MIDR_HISI_HIP09), {}, }; static const struct midr_range spectre_bhb_k11_list[] = { diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index c9bfeda89e40..66132683f1ed 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,) -CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,) -CFLAGS_traps.o += $(call cc-option,-Wno-override-init,) -CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,) +CFLAGS_module.o += $(call cc-disable-warning, override-init) +CFLAGS_syscall.o += $(call cc-disable-warning, override-init) +CFLAGS_traps.o += $(call cc-disable-warning, override-init) +CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER ifndef CONFIG_DYNAMIC_FTRACE diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index b2f4cbe01ae8..2e188e8f1468 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -19,4 +19,4 @@ kvm-y += tlb.o kvm-y += vcpu.o kvm-y += vm.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) +CFLAGS_exit.o += $(call cc-disable-warning, override-init) diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index dc025888f6d2..b41fc1044668 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h @@ -91,4 +91,20 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
#endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS +#ifndef __ASSEMBLY__ +/* + * Some syscall entry functions on mips start with "__sys_" (fork and clone, + * for instance). We should also match the sys_ variant with those. + */ +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + return !strcmp(sym, name) || + (!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4)); +} +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FTRACE_SYSCALLS */ #endif /* _ASM_MIPS_FTRACE_H */ diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index d09ca77e624d..9369a8dc385e 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -57,10 +57,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
-/* - * Used to synchronize entry to deep idle states. Actually per-core rather - * than per-CPU. - */ +/* Used to synchronize entry to deep idle states */ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */ @@ -112,9 +109,10 @@ int cps_pm_enter_state(enum cps_pm_state state) cps_nc_entry_fn entry; struct core_boot_config *core_cfg; struct vpe_boot_config *vpe_cfg; + atomic_t *barrier;
/* Check that there is an entry function for this state */ - entry = per_cpu(nc_asm_enter, core)[state]; + entry = per_cpu(nc_asm_enter, cpu)[state]; if (!entry) return -EINVAL;
@@ -150,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state) smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */ - core_ready_count = per_cpu(ready_count, core); + core_ready_count = per_cpu(ready_count, cpu); nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), (unsigned long)core_ready_count); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); @@ -158,7 +156,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
/* Ensure ready_count is zero-initialised before the assembly runs */ WRITE_ONCE(*nc_core_ready_count, 0); - coupled_barrier(&per_cpu(pm_barrier, core), online); + barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu])); + coupled_barrier(barrier, online);
/* Run the generated entry code */ left = entry(online, nc_core_ready_count); @@ -629,12 +628,14 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
static int cps_pm_online_cpu(unsigned int cpu) { - enum cps_pm_state state; - unsigned core = cpu_core(&cpu_data[cpu]); + unsigned int sibling, core; void *entry_fn, *core_rc; + enum cps_pm_state state; + + core = cpu_core(&cpu_data[cpu]);
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { - if (per_cpu(nc_asm_enter, core)[state]) + if (per_cpu(nc_asm_enter, cpu)[state]) continue; if (!test_bit(state, state_support)) continue; @@ -646,16 +647,19 @@ static int cps_pm_online_cpu(unsigned int cpu) clear_bit(state, state_support); }
- per_cpu(nc_asm_enter, core)[state] = entry_fn; + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(nc_asm_enter, sibling)[state] = entry_fn; }
- if (!per_cpu(ready_count, core)) { + if (!per_cpu(ready_count, cpu)) { core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count, core) = core_rc; + + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(ready_count, sibling) = core_rc; }
return 0; diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index d99863cd6cde..049152f8d597 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h @@ -29,6 +29,7 @@ extern cpumask_var_t node_to_cpumask_map[]; #ifdef CONFIG_MEMORY_HOTPLUG extern unsigned long max_pfn; u64 memory_hotplug_max(void); +u64 hot_add_drconf_memory_max(void); #else #define memory_hotplug_max() memblock_end_of_DRAM() #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 935568d68196..b1dc4cb9f78e 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2982,11 +2982,11 @@ static void __init fixup_device_tree_pmac(void) char type[8]; phandle node;
- // Some pmacs are missing #size-cells on escc nodes + // Some pmacs are missing #size-cells on escc or i2s nodes for (node = 0; prom_next_node(&node); ) { type[0] = '\0'; prom_getprop(node, "device_type", type, sizeof(type)); - if (prom_strcmp(type, "escc")) + if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s")) continue;
if (prom_getproplen(node, "#size-cells") != PROM_ERROR) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 04189689c127..0d807bf2328d 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -988,7 +988,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, return 0; }
- +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (radix_enabled()) @@ -996,6 +996,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
return false; } +#endif
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 3c1da08304d0..603a0f652ba6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1336,7 +1336,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) return nid; }
-static u64 hot_add_drconf_memory_max(void) +u64 hot_add_drconf_memory_max(void) { struct device_node *memory = NULL; struct device_node *dn = NULL; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 42867469752d..33d726bb99e3 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2222,6 +2222,10 @@ static struct pmu power_pmu = { #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_DATA_PAGE_SIZE) + +#define SIER_TYPE_SHIFT 15 +#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT) + /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -2290,6 +2294,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val, is_kernel_addr(mfspr(SPRN_SIAR))) record = 0;
+ /* + * SIER[46-48] presents instruction type of the sampled instruction. + * In ISA v3.0 and before values "0" and "7" are considered reserved. + * In ISA v3.1, value "7" has been used to indicate "larx/stcx". + * Drop the sample if "type" has reserved values for this field with a + * ISA version check. + */ + if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && + ppmu->get_mem_data_src) { + val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT; + if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) { + record = 0; + atomic64_inc(&event->lost_samples); + } + } + /* * Finally record data if requested. */ diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 56301b2bc8ae..031a2b63c171 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
sier = mfspr(SPRN_SIER); val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; - if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) + if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) { + dsrc->val = 0; return; + }
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index ae6f7a235d8b..d6ebc19fb99c 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -52,7 +52,8 @@ enum { enum { DDW_EXT_SIZE = 0, DDW_EXT_RESET_DMA_WIN = 1, - DDW_EXT_QUERY_OUT_SIZE = 2 + DDW_EXT_QUERY_OUT_SIZE = 2, + DDW_EXT_LIMITED_ADDR_MODE = 3 };
static struct iommu_table *iommu_pseries_alloc_table(int node) @@ -1284,17 +1285,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
static phys_addr_t ddw_memory_hotplug_max(void) { - resource_size_t max_addr = memory_hotplug_max(); - struct device_node *memory; + resource_size_t max_addr;
- for_each_node_by_type(memory, "memory") { - struct resource res; - - if (of_address_to_resource(memory, 0, &res)) - continue; - - max_addr = max_t(resource_size_t, max_addr, res.end + 1); - } +#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) + max_addr = hot_add_drconf_memory_max(); +#else + max_addr = memblock_end_of_DRAM(); +#endif
return max_addr; } @@ -1331,6 +1328,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) ret); }
+/* + * Platforms support placing PHB in limited address mode starting with LoPAR + * level 2.13 implement. In this mode, the DMA address returned by DDW is over + * 4GB but, less than 64-bits. This benefits IO adapters that don't support + * 64-bits for DMA addresses. + */ +static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn) +{ + int ret; + u32 cfg_addr, reset_dma_win, las_supported; + u64 buid; + struct device_node *dn; + struct pci_dn *pdn; + + ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win); + if (ret) + goto out; + + ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported); + + /* Limited Address Space extension available on the platform but DDW in + * limited addressing mode not supported + */ + if (!ret && !las_supported) + ret = -EPROTO; + + if (ret) { + dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret); + goto out; + } + + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8); + + ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid), + BUID_LO(buid), 1); + if (ret) + dev_info(&dev->dev, + "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ", + reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid), + ret); + +out: + return ret; +} + /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */ static int iommu_get_page_shift(u32 query_page_size) { @@ -1398,7 +1443,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64 * * returns true if can map all pages (direct mapping), false otherwise.. */ -static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) +static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask) { int len = 0, ret; int max_ram_len = order_base_2(ddw_memory_hotplug_max()); @@ -1417,6 +1462,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) bool pmem_present; struct pci_dn *pci = PCI_DN(pdn); struct property *default_win = NULL; + bool limited_addr_req = false, limited_addr_enabled = false; + int dev_max_ddw; + int ddw_sz;
dn = of_find_node_by_type(NULL, "ibm,pmemory"); pmem_present = dn != NULL; @@ -1443,7 +1491,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) * the ibm,ddw-applicable property holds the tokens for: * ibm,query-pe-dma-window * ibm,create-pe-dma-window - * ibm,remove-pe-dma-window * for the given node in that order. * the property is actually in the parent, not the PE */ @@ -1463,6 +1510,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (ret != 0) goto out_failed;
+ /* DMA Limited Addressing required? This is when the driver has + * requested to create DDW but supports mask which is less than 64-bits + */ + limited_addr_req = (dma_mask != DMA_BIT_MASK(64)); + + /* place the PHB in Limited Addressing mode */ + if (limited_addr_req) { + if (limited_dma_window(dev, pdn)) + goto out_failed; + + /* PHB is in Limited address mode */ + limited_addr_enabled = true; + } + /* * If there is no window available, remove the default DMA window, * if it's present. This will make all the resources available to the @@ -1509,6 +1570,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_failed; }
+ /* Maximum DMA window size that the device can address (in log2) */ + dev_max_ddw = fls64(dma_mask); + + /* If the device DMA mask is less than 64-bits, make sure the DMA window + * size is not bigger than what the device can access + */ + ddw_sz = min(order_base_2(query.largest_available_block << page_shift), + dev_max_ddw); + /* * The "ibm,pmemory" can appear anywhere in the address space. * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS @@ -1517,23 +1587,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ len = max_ram_len; if (pmem_present) { - if (query.largest_available_block >= - (1ULL << (MAX_PHYSMEM_BITS - page_shift))) + if (ddw_sz >= MAX_PHYSMEM_BITS) len = MAX_PHYSMEM_BITS; else dev_info(&dev->dev, "Skipping ibm,pmemory"); }
/* check if the available block * number of ptes will map everything */ - if (query.largest_available_block < (1ULL << (len - page_shift))) { + if (ddw_sz < len) { dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu %llu-sized pages\n", 1ULL << len, query.largest_available_block, 1ULL << page_shift);
- len = order_base_2(query.largest_available_block << page_shift); - + len = ddw_sz; dynamic_mapping = true; } else { direct_mapping = !default_win_removed || @@ -1547,8 +1615,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ if (default_win_removed && pmem_present && !direct_mapping) { /* DDW is big enough to be split */ - if ((query.largest_available_block << page_shift) >= - MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + if ((1ULL << ddw_sz) >= + MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + direct_mapping = true;
/* offset of the Dynamic part of DDW */ @@ -1559,8 +1628,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) dynamic_mapping = true;
/* create max size DDW possible */ - len = order_base_2(query.largest_available_block - << page_shift); + len = ddw_sz; } }
@@ -1600,7 +1668,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (direct_mapping) { /* DDW maps the whole partition, so enable direct DMA mapping */ - ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, + ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", @@ -1689,7 +1757,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) __remove_dma_window(pdn, ddw_avail, create.liobn);
out_failed: - if (default_win_removed) + if (default_win_removed || limited_addr_enabled) reset_dma_window(dev, pdn);
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); @@ -1708,6 +1776,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << max_ram_len);
+ dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n", + limited_addr_req, limited_addr_enabled, direct_mapping); + return direct_mapping; }
@@ -1833,8 +1904,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) { struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
- /* only attempt to use a new window if 64-bit DMA is requested */ - if (dma_mask < DMA_BIT_MASK(64)) + /* For DDW, DMA mask should be more than 32-bits. For mask more then + * 32-bits but less then 64-bits, DMA addressing is supported in + * Limited Addressing mode. + */ + if (dma_mask <= DMA_BIT_MASK(32)) return false;
dev_dbg(&pdev->dev, "node is %pOF\n", dn); @@ -1847,7 +1921,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) */ pdn = pci_dma_find(dn, NULL); if (pdn && PCI_DN(pdn)) - return enable_ddw(pdev, pdn); + return enable_ddw(pdev, pdn, dma_mask);
return false; } @@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, struct memory_notify *arg = data; int ret = 0;
+ /* This notifier can get called when onlining persistent memory as well. + * TCEs are not pre-mapped for persistent memory. Persistent memory will + * always be above ddw_memory_hotplug_max() + */ + switch (action) { case MEM_GOING_ONLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } @@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, case MEM_OFFLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index febf820d5058..e8beadc2bffd 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -26,12 +26,9 @@ * When not using MMU this corresponds to the first free page in * physical memory (aligned on a page boundary). */ -#ifdef CONFIG_64BIT #ifdef CONFIG_MMU +#ifdef CONFIG_64BIT #define PAGE_OFFSET kernel_map.page_offset -#else -#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) -#endif /* * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so * define the PAGE_OFFSET value for SV48 and SV39. @@ -41,6 +38,9 @@ #else #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #endif /* CONFIG_64BIT */ +#else +#define PAGE_OFFSET ((unsigned long)phys_ram_base) +#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__
@@ -97,11 +97,7 @@ typedef struct page *pgtable_t; #define MIN_MEMBLOCK_ADDR 0 #endif
-#ifdef CONFIG_MMU #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) -#else -#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) -#endif /* CONFIG_MMU */
struct kernel_mapping { unsigned long page_offset; diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index c0866ada5bbc..479550cdb440 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -12,7 +12,7 @@ #include <asm/pgtable-bits.h>
#ifndef CONFIG_MMU -#define KERNEL_LINK_ADDR PAGE_OFFSET +#define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL) #define KERN_VIRT_SIZE (UL(-1)) #else
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 69dc8aaab3fb..6b3b5255c889 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE) endif -CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) -CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,) +CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init) +CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_KEXEC_CORE AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 9b6e86ce3867..bb77607c87aa 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -4,6 +4,7 @@ #include <linux/smp.h> #include <linux/sched.h> #include <linux/hugetlb.h> +#include <linux/mmu_notifier.h> #include <asm/sbi.h> #include <asm/mmu_context.h>
@@ -78,10 +79,17 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); }
-static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, +static inline unsigned long get_mm_asid(struct mm_struct *mm) +{ + return mm ? cntx2asid(atomic_long_read(&mm->context.id)) : FLUSH_TLB_NO_ASID; +} + +static void __flush_tlb_range(struct mm_struct *mm, + const struct cpumask *cmask, unsigned long start, unsigned long size, unsigned long stride) { + unsigned long asid = get_mm_asid(mm); unsigned int cpu;
if (cpumask_empty(cmask)) @@ -105,30 +113,26 @@ static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, }
put_cpu(); -}
-static inline unsigned long get_mm_asid(struct mm_struct *mm) -{ - return cntx2asid(atomic_long_read(&mm->context.id)); + if (mm) + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + size); }
void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), - 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + __flush_tlb_range(mm, mm_cpumask(mm), 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); }
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int page_size) { - __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), - start, end - start, page_size); + __flush_tlb_range(mm, mm_cpumask(mm), start, end - start, page_size); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE); }
@@ -161,13 +165,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } }
- __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), start, end - start, stride_size); }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID, + __flush_tlb_range(NULL, cpu_online_mask, start, end - start, PAGE_SIZE); }
@@ -175,7 +179,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE); } #endif @@ -189,7 +193,10 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, unsigned long uaddr) { + unsigned long start = uaddr & PAGE_MASK; + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, start + PAGE_SIZE); }
void arch_flush_tlb_batched_pending(struct mm_struct *mm) @@ -199,7 +206,7 @@ void arch_flush_tlb_batched_pending(struct mm_struct *mm)
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) { - __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, - FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + __flush_tlb_range(NULL, &batch->cpumask, + 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); cpumask_clear(&batch->cpumask); } diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c index 00a6d370a280..280266a74f37 100644 --- a/arch/s390/hypfs/hypfs_diag_fs.c +++ b/arch/s390/hypfs/hypfs_diag_fs.c @@ -208,6 +208,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info) snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(), cpu_info)); cpu_dir = hypfs_mkdir(cpus_dir, buffer); + if (IS_ERR(cpu_dir)) + return PTR_ERR(cpu_dir); rc = hypfs_create_u64(cpu_dir, "mgmtime", cpu_info__acc_time(diag204_get_info_type(), cpu_info) - cpu_info__lp_time(diag204_get_info_type(), cpu_info)); diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index e95b2c8081eb..793afe236df0 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -85,7 +85,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_pmds = 1; - if (mm_alloc_pgste(tlb->mm)) + if (mm_has_pgste(tlb->mm)) gmap_unlink(tlb->mm, (unsigned long *)pte, address); tlb_remove_ptdesc(tlb, pte); } diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index a5b4fe2ad931..6ca9ea4a230b 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -70,6 +70,7 @@ void __init mem_init(void) map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); memblock_free((void *)brk_end, uml_reserved - brk_end); uml_reserved = brk_end; + min_low_pfn = PFN_UP(__pa(uml_reserved));
/* this will put all low memory onto the freelists */ memblock_free_all(); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7b3622ba4c3c..15425c9bdc2b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2420,6 +2420,7 @@ config STRICT_SIGALTSTACK_SIZE config CFI_AUTO_DEFAULT bool "Attempt to use FineIBT by default at boot time" depends on FINEIBT + depends on !RUST || RUSTC_VERSION >= 108800 default y help Attempt to use FineIBT by default at boot time. If enabled, diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh index c9299aeb7333..3882ead513f7 100644 --- a/arch/x86/boot/genimage.sh +++ b/arch/x86/boot/genimage.sh @@ -22,6 +22,7 @@ # This script requires: # bash # syslinux +# genisoimage # mtools (for fdimage* and hdimage) # edk2/OVMF (for hdimage) # @@ -251,7 +252,9 @@ geniso() { cp "$isolinux" "$ldlinux" "$tmp_dir" cp "$FBZIMAGE" "$tmp_dir"/linux echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg - cp "${FDINITRDS[@]}" "$tmp_dir"/ + if [ ${#FDINITRDS[@]} -gt 0 ]; then + cp "${FDINITRDS[@]}" "$tmp_dir"/ + fi genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \ -quiet -o "$FIMAGE" -b isolinux.bin \ -c boot.cat -no-emul-boot -boot-load-size 4 \ diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index 58e3124ee2b4..5b96249734ad 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -63,7 +63,7 @@ THUNK warn_thunk_thunk, __warn_thunk * entirely in the C code, and use an alias emitted by the linker script * instead. */ -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) && defined(CONFIG_SMP) EXPORT_SYMBOL(__ref_stack_chk_guard); #endif #endif diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index c3a2f6f57770..d34ee6f04f18 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -272,7 +272,7 @@ static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct perf_ibs *perf_ibs; - u64 max_cnt, config; + u64 config; int ret;
perf_ibs = get_ibs_pmu(event->attr.type); @@ -309,10 +309,19 @@ static int perf_ibs_init(struct perf_event *event) if (!hwc->sample_period) hwc->sample_period = 0x10; } else { - max_cnt = config & perf_ibs->cnt_mask; + u64 period = 0; + + if (perf_ibs == &perf_ibs_op) { + period = (config & IBS_OP_MAX_CNT) << 4; + if (ibs_caps & IBS_CAPS_OPCNTEXT) + period |= config & IBS_OP_MAX_CNT_EXT_MASK; + } else { + period = (config & IBS_FETCH_MAX_CNT) << 4; + } + config &= ~perf_ibs->cnt_mask; - event->attr.sample_period = max_cnt << 4; - hwc->sample_period = event->attr.sample_period; + event->attr.sample_period = period; + hwc->sample_period = period; }
if (!hwc->sample_period) @@ -1222,7 +1231,8 @@ static __init int perf_ibs_op_init(void) if (ibs_caps & IBS_CAPS_OPCNTEXT) { perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK; perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK; - perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK | + IBS_OP_CUR_CNT_EXT_MASK); }
if (ibs_caps & IBS_CAPS_ZEN4) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 1b82bcc6fa55..54007174c15b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2240,8 +2240,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_ setup_pebs_fixed_sample_data); }
-static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) +static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) { + u64 pebs_enabled = cpuc->pebs_enabled & mask; struct perf_event *event; int bit;
@@ -2252,7 +2253,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int * It needs to call intel_pmu_save_and_restart_reload() to * update the event->count for this case. */ - for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { + for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { event = cpuc->events[bit]; if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_save_and_restart_reload(event, 0); @@ -2287,7 +2288,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d }
if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, size); + intel_pmu_pebs_event_update_no_drain(cpuc, mask); return; }
@@ -2397,7 +2398,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); + intel_pmu_pebs_event_update_no_drain(cpuc, mask); return; }
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 806649c7f23d..9a0f29be1a9e 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -22,8 +22,9 @@ #define SECOND_BYTE_OPCODE_UD2 0x0b
#define BUG_NONE 0xffff -#define BUG_UD1 0xfffe -#define BUG_UD2 0xfffd +#define BUG_UD2 0xfffe +#define BUG_UD1 0xfffd +#define BUG_UD1_UBSAN 0xfffc
#ifdef CONFIG_GENERIC_BUG
diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 31d19c815f99..7dd5ab239c87 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -126,6 +126,17 @@ static inline int cfi_get_offset(void)
extern u32 cfi_get_func_hash(void *func);
+#ifdef CONFIG_FINEIBT +extern bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type); +#else +static inline bool +decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type) +{ + return false; +} + +#endif + #else static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { diff --git a/arch/x86/include/asm/ibt.h b/arch/x86/include/asm/ibt.h index 1e59581d500c..b778ae6e67ee 100644 --- a/arch/x86/include/asm/ibt.h +++ b/arch/x86/include/asm/ibt.h @@ -41,7 +41,7 @@ _ASM_PTR fname "\n\t" \ ".popsection\n\t"
-static inline __attribute_const__ u32 gen_endbr(void) +static __always_inline __attribute_const__ u32 gen_endbr(void) { u32 endbr;
@@ -56,7 +56,7 @@ static inline __attribute_const__ u32 gen_endbr(void) return endbr; }
-static inline __attribute_const__ u32 gen_endbr_poison(void) +static __always_inline __attribute_const__ u32 gen_endbr_poison(void) { /* * 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 62d8b9448dc5..c6198fbcc1d7 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -46,6 +46,7 @@ #define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY)
#define INTEL_PENTIUM_PRO IFM(6, 0x01) +#define INTEL_PENTIUM_III_DESCHUTES IFM(6, 0x05)
#define INTEL_CORE_YONAH IFM(6, 0x0E)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 41a0ebb699ec..f677382093f3 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -56,6 +56,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *);
+void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler); + void stop_nmi(void); void restart_nmi(void); void local_touch_nmi(void); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index c55a79d5feae..2d9c250b3c8d 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -349,9 +349,9 @@ do { \ \ asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ - : [var] "+m" (__my_cpu_var(_var)), \ - "+a" (old__.low), \ - "+d" (old__.high) \ + : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \ + "+a" (old__.low), \ + "+d" (old__.high)) \ : "b" (new__.low), \ "c" (new__.high), \ "S" (&(_var)) \ @@ -380,10 +380,10 @@ do { \ asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ CC_SET(z) \ - : CC_OUT(z) (success), \ - [var] "+m" (__my_cpu_var(_var)), \ - "+a" (old__.low), \ - "+d" (old__.high) \ + : ALT_OUTPUT_SP(CC_OUT(z) (success), \ + [var] "+m" (__my_cpu_var(_var)), \ + "+a" (old__.low), \ + "+d" (old__.high)) \ : "b" (new__.low), \ "c" (new__.high), \ "S" (&(_var)) \ @@ -420,9 +420,9 @@ do { \ \ asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ - : [var] "+m" (__my_cpu_var(_var)), \ - "+a" (old__.low), \ - "+d" (old__.high) \ + : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \ + "+a" (old__.low), \ + "+d" (old__.high)) \ : "b" (new__.low), \ "c" (new__.high), \ "S" (&(_var)) \ @@ -451,10 +451,10 @@ do { \ asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ CC_SET(z) \ - : CC_OUT(z) (success), \ - [var] "+m" (__my_cpu_var(_var)), \ - "+a" (old__.low), \ - "+d" (old__.high) \ + : ALT_OUTPUT_SP(CC_OUT(z) (success), \ + [var] "+m" (__my_cpu_var(_var)), \ + "+a" (old__.low), \ + "+d" (old__.high)) \ : "b" (new__.low), \ "c" (new__.high), \ "S" (&(_var)) \ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 7505bb5d260a..aa351c4a20ee 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -520,6 +520,7 @@ struct pebs_xmm { */ #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) +#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52) #define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_VAL (1ULL<<18) #define IBS_OP_ENABLE (1ULL<<17) diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h index 98726c2b04f8..ddaf3c62efb5 100644 --- a/arch/x86/include/asm/sev-common.h +++ b/arch/x86/include/asm/sev-common.h @@ -116,7 +116,7 @@ enum psc_op { #define GHCB_MSR_VMPL_REQ 0x016 #define GHCB_MSR_VMPL_REQ_LEVEL(v) \ /* GHCBData[39:32] */ \ - (((u64)(v) & GENMASK_ULL(7, 0) << 32) | \ + ((((u64)(v) & GENMASK_ULL(7, 0)) << 32) | \ /* GHCBDdata[11:0] */ \ GHCB_MSR_VMPL_REQ)
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 9b82eebd7add..dafbf581c515 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -26,7 +26,7 @@ #define XLF_5LEVEL_ENABLED (1<<6) #define XLF_MEM_ENCRYPTION (1<<7)
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__
#include <linux/types.h> #include <linux/screen_info.h> @@ -210,6 +210,6 @@ enum x86_hardware_subarch { X86_NR_SUBARCHS, };
-#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_BOOTPARAM_H */ diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h index 2f491efe3a12..55bc66867156 100644 --- a/arch/x86/include/uapi/asm/e820.h +++ b/arch/x86/include/uapi/asm/e820.h @@ -54,7 +54,7 @@ */ #define E820_RESERVED_KERN 128
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> struct e820entry { __u64 addr; /* start of memory segment */ @@ -76,7 +76,7 @@ struct e820map { #define BIOS_ROM_BASE 0xffe00000 #define BIOS_ROM_END 0xffffffff
-#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_E820_H */ diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h index d62ac5db093b..a82c039d8e6a 100644 --- a/arch/x86/include/uapi/asm/ldt.h +++ b/arch/x86/include/uapi/asm/ldt.h @@ -12,7 +12,7 @@ /* The size of each LDT entry. */ #define LDT_ENTRY_SIZE 8
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* * Note on 64bit base and limit is ignored and you cannot set DS/ES/CS * not to the default values if you still want to do syscalls. This @@ -44,5 +44,5 @@ struct user_desc { #define MODIFY_LDT_CONTENTS_STACK 1 #define MODIFY_LDT_CONTENTS_CODE 2
-#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* _ASM_X86_LDT_H */ diff --git a/arch/x86/include/uapi/asm/msr.h b/arch/x86/include/uapi/asm/msr.h index e7516b402a00..4b8917ca28fe 100644 --- a/arch/x86/include/uapi/asm/msr.h +++ b/arch/x86/include/uapi/asm/msr.h @@ -2,7 +2,7 @@ #ifndef _UAPI_ASM_X86_MSR_H #define _UAPI_ASM_X86_MSR_H
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__
#include <linux/types.h> #include <linux/ioctl.h> @@ -10,5 +10,5 @@ #define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8]) #define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
-#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _UAPI_ASM_X86_MSR_H */ diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h index 16074b9c93bb..5823584dea13 100644 --- a/arch/x86/include/uapi/asm/ptrace-abi.h +++ b/arch/x86/include/uapi/asm/ptrace-abi.h @@ -25,7 +25,7 @@
#else /* __i386__ */
-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) +#if defined(__ASSEMBLER__) || defined(__FRAME_OFFSETS) /* * C ABI says these regs are callee-preserved. They aren't saved on kernel entry * unless syscall needs a complete, fully filled "struct pt_regs". @@ -57,7 +57,7 @@ #define EFLAGS 144 #define RSP 152 #define SS 160 -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */
/* top of stack page */ #define FRAME_SIZE 168 @@ -87,7 +87,7 @@
#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #endif
diff --git a/arch/x86/include/uapi/asm/ptrace.h b/arch/x86/include/uapi/asm/ptrace.h index 85165c0edafc..e0b5b4f6226b 100644 --- a/arch/x86/include/uapi/asm/ptrace.h +++ b/arch/x86/include/uapi/asm/ptrace.h @@ -7,7 +7,7 @@ #include <asm/processor-flags.h>
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__
#ifdef __i386__ /* this struct defines the way the registers are stored on the @@ -81,6 +81,6 @@ struct pt_regs {
-#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_PTRACE_H */ diff --git a/arch/x86/include/uapi/asm/setup_data.h b/arch/x86/include/uapi/asm/setup_data.h index b111b0c18544..50c45ead4e7c 100644 --- a/arch/x86/include/uapi/asm/setup_data.h +++ b/arch/x86/include/uapi/asm/setup_data.h @@ -18,7 +18,7 @@ #define SETUP_INDIRECT (1<<31) #define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -78,6 +78,6 @@ struct ima_setup_data { __u64 size; } __attribute__((packed));
-#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_SETUP_DATA_H */ diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h index f777346450ec..1067efabf18b 100644 --- a/arch/x86/include/uapi/asm/signal.h +++ b/arch/x86/include/uapi/asm/signal.h @@ -2,7 +2,7 @@ #ifndef _UAPI_ASM_X86_SIGNAL_H #define _UAPI_ASM_X86_SIGNAL_H
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/compiler.h>
@@ -16,7 +16,7 @@ struct siginfo; typedef unsigned long sigset_t;
#endif /* __KERNEL__ */ -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */
#define SIGHUP 1 @@ -68,7 +68,7 @@ typedef unsigned long sigset_t;
#include <asm-generic/signal-defs.h>
-#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__
# ifndef __KERNEL__ @@ -106,6 +106,6 @@ typedef struct sigaltstack { __kernel_size_t ss_size; } stack_t;
-#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_SIGNAL_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 66e77bd7d511..6ab96bc764cf 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1254,6 +1254,7 @@ asm( ".pushsection .rodata \n" " endbr64 \n" " subl $0x12345678, %r10d \n" " je fineibt_preamble_end \n" + "fineibt_preamble_ud2: \n" " ud2 \n" " nop \n" "fineibt_preamble_end: \n" @@ -1261,9 +1262,11 @@ asm( ".pushsection .rodata \n" );
extern u8 fineibt_preamble_start[]; +extern u8 fineibt_preamble_ud2[]; extern u8 fineibt_preamble_end[];
#define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start) +#define fineibt_preamble_ud2 (fineibt_preamble_ud2 - fineibt_preamble_start) #define fineibt_preamble_hash 7
asm( ".pushsection .rodata \n" @@ -1568,6 +1571,33 @@ static void poison_cfi(void *addr) } }
+/* + * regs->ip points to a UD2 instruction, return true and fill out target and + * type when this UD2 is from a FineIBT preamble. + * + * We check the preamble by checking for the ENDBR instruction relative to the + * UD2 instruction. + */ +bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type) +{ + unsigned long addr = regs->ip - fineibt_preamble_ud2; + u32 endbr, hash; + + __get_kernel_nofault(&endbr, addr, u32, Efault); + if (endbr != gen_endbr()) + return false; + + *target = addr + fineibt_preamble_size; + + __get_kernel_nofault(&hash, addr + fineibt_preamble_hash, u32, Efault); + *type = (u32)regs->r10 + hash; + + return true; + +Efault: + return false; +} + #else
static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, diff --git a/arch/x86/kernel/cfi.c b/arch/x86/kernel/cfi.c index e6bf78fac146..f6905bef0af8 100644 --- a/arch/x86/kernel/cfi.c +++ b/arch/x86/kernel/cfi.c @@ -70,11 +70,25 @@ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) unsigned long target; u32 type;
- if (!is_cfi_trap(regs->ip)) - return BUG_TRAP_TYPE_NONE; + switch (cfi_mode) { + case CFI_KCFI: + if (!is_cfi_trap(regs->ip)) + return BUG_TRAP_TYPE_NONE; + + if (!decode_cfi_insn(regs, &target, &type)) + return report_cfi_failure_noaddr(regs, regs->ip); + + break;
- if (!decode_cfi_insn(regs, &target, &type)) - return report_cfi_failure_noaddr(regs, regs->ip); + case CFI_FINEIBT: + if (!decode_fineibt_insn(regs, &target, &type)) + return BUG_TRAP_TYPE_NONE; + + break; + + default: + return BUG_TRAP_TYPE_NONE; + }
return report_cfi_failure(regs, regs->ip, &target, type); } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c683abd640fd..0e9ab0b9a494 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1442,9 +1442,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) { + enum spectre_v2_user_cmd mode; char arg[20]; int ret, i;
+ mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? + SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; + switch (spectre_v2_cmd) { case SPECTRE_V2_CMD_NONE: return SPECTRE_V2_USER_CMD_NONE; @@ -1457,7 +1461,7 @@ spectre_v2_parse_user_cmdline(void) ret = cmdline_find_option(boot_command_line, "spectre_v2_user", arg, sizeof(arg)); if (ret < 0) - return SPECTRE_V2_USER_CMD_AUTO; + return mode;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { if (match_option(arg, ret, v2_user_options[i].option)) { @@ -1467,8 +1471,8 @@ spectre_v2_parse_user_cmdline(void) } }
- pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_USER_CMD_AUTO; + pr_err("Unknown user space protection option (%s). Switching to default\n", arg); + return mode; }
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index df5650eb3f08..362cc71bbc86 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -74,7 +74,7 @@ void intel_collect_cpu_info(struct cpu_signature *sig) sig->pf = 0; sig->rev = intel_get_microcode_revision();
- if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) { + if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) { unsigned int val[2];
/* get processor flags from MSR 0x17 */ diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index ed163c8c8604..9a95d00f1423 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -40,8 +40,12 @@ #define CREATE_TRACE_POINTS #include <trace/events/nmi.h>
+/* + * An emergency handler can be set in any context including NMI + */ struct nmi_desc { raw_spinlock_t lock; + nmi_handler_t emerg_handler; struct list_head head; };
@@ -132,9 +136,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) static int nmi_handle(unsigned int type, struct pt_regs *regs) { struct nmi_desc *desc = nmi_to_desc(type); + nmi_handler_t ehandler; struct nmiaction *a; int handled=0;
+ /* + * Call the emergency handler, if set + * + * In the case of crash_nmi_callback() emergency handler, it will + * return in the case of the crashing CPU to enable it to complete + * other necessary crashing actions ASAP. Other handlers in the + * linked list won't need to be run. + */ + ehandler = desc->emerg_handler; + if (ehandler) + return ehandler(type, regs); + rcu_read_lock();
/* @@ -224,6 +241,31 @@ void unregister_nmi_handler(unsigned int type, const char *name) } EXPORT_SYMBOL_GPL(unregister_nmi_handler);
+/** + * set_emergency_nmi_handler - Set emergency handler + * @type: NMI type + * @handler: the emergency handler to be stored + * + * Set an emergency NMI handler which, if set, will preempt all the other + * handlers in the linked list. If a NULL handler is passed in, it will clear + * it. It is expected that concurrent calls to this function will not happen + * or the system is screwed beyond repair. + */ +void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler) +{ + struct nmi_desc *desc = nmi_to_desc(type); + + if (WARN_ON_ONCE(desc->emerg_handler == handler)) + return; + desc->emerg_handler = handler; + + /* + * Ensure the emergency handler is visible to other CPUs before + * function return + */ + smp_wmb(); +} + static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index dc1dd3f3e67f..9aaac1f9f45b 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -926,15 +926,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - /* Would it be better to replace the trap vector here? */ - if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback, - NMI_FLAG_FIRST, "crash")) - return; /* Return what? */ + /* - * Ensure the new callback function is set before sending - * out the NMI + * Set emergency handler to preempt other handlers. */ - wmb(); + set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
apic_send_IPI_allbutself(NMI_VECTOR);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f1fac08fdef2..2c451de702c8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -681,9 +681,9 @@ static void __init smp_quirk_init_udelay(void) return;
/* if modern processor, use no delay */ - if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || - ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) || - ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO) || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 >= 0x18) || + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0xF)) { init_udelay = 0; return; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5e3e036e6e53..b18fc7539b8d 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -94,10 +94,17 @@ __always_inline int is_valid_bugaddr(unsigned long addr)
/* * Check for UD1 or UD2, accounting for Address Size Override Prefixes. - * If it's a UD1, get the ModRM byte to pass along to UBSan. + * If it's a UD1, further decode to determine its use: + * + * UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax + * UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax + * static_call: 0f b9 cc ud1 %esp,%ecx + * + * Notably UBSAN uses EAX, static_call uses ECX. */ -__always_inline int decode_bug(unsigned long addr, u32 *imm) +__always_inline int decode_bug(unsigned long addr, s32 *imm, int *len) { + unsigned long start = addr; u8 v;
if (addr < TASK_SIZE_MAX) @@ -110,24 +117,42 @@ __always_inline int decode_bug(unsigned long addr, u32 *imm) return BUG_NONE;
v = *(u8 *)(addr++); - if (v == SECOND_BYTE_OPCODE_UD2) + if (v == SECOND_BYTE_OPCODE_UD2) { + *len = addr - start; return BUG_UD2; + }
- if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1) + if (v != SECOND_BYTE_OPCODE_UD1) return BUG_NONE;
- /* Retrieve the immediate (type value) for the UBSAN UD1 */ - v = *(u8 *)(addr++); - if (X86_MODRM_RM(v) == 4) - addr++; - *imm = 0; - if (X86_MODRM_MOD(v) == 1) - *imm = *(u8 *)addr; - else if (X86_MODRM_MOD(v) == 2) - *imm = *(u32 *)addr; - else - WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v)); + v = *(u8 *)(addr++); /* ModRM */ + + if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4) + addr++; /* SIB */ + + /* Decode immediate, if present */ + switch (X86_MODRM_MOD(v)) { + case 0: if (X86_MODRM_RM(v) == 5) + addr += 4; /* RIP + disp32 */ + break; + + case 1: *imm = *(s8 *)addr; + addr += 1; + break; + + case 2: *imm = *(s32 *)addr; + addr += 4; + break; + + case 3: break; + } + + /* record instruction length */ + *len = addr - start; + + if (X86_MODRM_REG(v) == 0) /* EAX */ + return BUG_UD1_UBSAN;
return BUG_UD1; } @@ -258,10 +283,10 @@ static inline void handle_invalid_op(struct pt_regs *regs) static noinstr bool handle_bug(struct pt_regs *regs) { bool handled = false; - int ud_type; - u32 imm; + int ud_type, ud_len; + s32 ud_imm;
- ud_type = decode_bug(regs->ip, &imm); + ud_type = decode_bug(regs->ip, &ud_imm, &ud_len); if (ud_type == BUG_NONE) return handled;
@@ -281,15 +306,28 @@ static noinstr bool handle_bug(struct pt_regs *regs) */ if (regs->flags & X86_EFLAGS_IF) raw_local_irq_enable(); - if (ud_type == BUG_UD2) { + + switch (ud_type) { + case BUG_UD2: if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { - regs->ip += LEN_UD2; + regs->ip += ud_len; handled = true; } - } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { - pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip); + break; + + case BUG_UD1_UBSAN: + if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { + pr_crit("%s at %pS\n", + report_ubsan_failure(regs, ud_imm), + (void *)regs->ip); + } + break; + + default: + break; } + if (regs->flags & X86_EFLAGS_IF) raw_local_irq_disable(); instrumentation_end(); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 101725c149c4..9cbc1e6057d3 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -645,8 +645,13 @@ static void __init memory_map_top_down(unsigned long map_start, */ addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, map_end); - memblock_phys_free(addr, PMD_SIZE); - real_end = addr + PMD_SIZE; + if (!addr) { + pr_warn("Failed to release memory for alloc_low_pages()"); + real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE)); + } else { + memblock_phys_free(addr, PMD_SIZE); + real_end = addr + PMD_SIZE; + }
/* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ff253648706f..d8853afd314b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -967,9 +967,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, ret = __add_pages(nid, start_pfn, nr_pages, params); WARN_ON_ONCE(ret);
- /* update max_pfn, max_low_pfn and high_memory */ - update_end_of_memory_vars(start_pfn << PAGE_SHIFT, - nr_pages << PAGE_SHIFT); + /* + * Special case: add_pages() is called by memremap_pages() for adding device + * private pages. Do not bump up max_pfn in the device private path, + * because max_pfn changes affect dma_addressing_limited(). + * + * dma_addressing_limited() returning true when max_pfn is the device's + * addressable memory can force device drivers to use bounce buffers + * and impact their performance negatively: + */ + if (!params->pgmap) + /* update max_pfn, max_low_pfn and high_memory */ + update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
return ret; } diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 230f1dee4f09..e0b0ec0f8245 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void) memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
- /* Adapt physical memory region size based on available memory */ - if (memory_tb < kaslr_regions[0].size_tb) + /* + * Adapt physical memory region size based on available memory, + * except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the + * device BAR space assuming the direct map space is large enough + * for creating a ZONE_DEVICE mapping in the direct map corresponding + * to the physical BAR address. + */ + if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb)) kaslr_regions[0].size_tb = memory_tb;
/* diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 63230ff8cf4f..08e76a5ca155 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -27,6 +27,7 @@ #include <asm/mmu_context.h> #include <asm/cpu_device_id.h> #include <asm/microcode.h> +#include <asm/fred.h>
#ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -231,6 +232,19 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) */ #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); + + /* + * Reinitialize FRED to ensure the FRED MSRs contain the same values + * as before hibernation. + * + * Note, the setup of FRED RSPs requires access to percpu data + * structures. Therefore, FRED reinitialization can only occur after + * the percpu access pointer (i.e., MSR_GS_BASE) is restored. + */ + if (ctxt->cr4 & X86_CR4_FRED) { + cpu_init_fred_exceptions(); + cpu_init_fred_rsps(); + } #else loadsegment(fs, __KERNEL_PERCPU); #endif diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c index e80ab7d28117..1b0d95328b2c 100644 --- a/arch/x86/um/os-Linux/mcontext.c +++ b/arch/x86/um/os-Linux/mcontext.c @@ -27,7 +27,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc) COPY(RIP); COPY2(EFLAGS, EFL); COPY2(CS, CSGSFS); - regs->gp[CS / sizeof(unsigned long)] &= 0xffff; - regs->gp[CS / sizeof(unsigned long)] |= 3; + regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48; #endif } diff --git a/block/badblocks.c b/block/badblocks.c index db4ec8b9b2a8..a9709771a101 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -1349,14 +1349,15 @@ static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors, len = sectors;
update_sectors: + /* This situation should never happen */ + WARN_ON(sectors < len); + s += len; sectors -= len;
if (sectors > 0) goto re_check;
- WARN_ON(sectors < 0); - if (unacked_badblocks > 0) rv = -1; else if (acked_badblocks > 0) diff --git a/block/bdev.c b/block/bdev.c index 738e3c8457e7..e7daca6565ea 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -168,9 +168,26 @@ int set_blocksize(struct file *file, int size)
/* Don't change the size if it is same as current */ if (inode->i_blkbits != blksize_bits(size)) { + /* + * Flush and truncate the pagecache before we reconfigure the + * mapping geometry because folio sizes are variable now. If a + * reader has already allocated a folio whose size is smaller + * than the new min_order but invokes readahead after the new + * min_order becomes visible, readahead will think there are + * "zero" blocks per folio and crash. Take the inode and + * invalidation locks to avoid racing with + * read/write/fallocate. + */ + inode_lock(inode); + filemap_invalidate_lock(inode->i_mapping); + sync_blockdev(bdev); + kill_bdev(bdev); + inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); + filemap_invalidate_unlock(inode->i_mapping); + inode_unlock(inode); } return 0; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f1cf7f2909f3..643d6bf66522 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1725,27 +1725,27 @@ int blkcg_policy_register(struct blkcg_policy *pol) struct blkcg *blkcg; int i, ret;
+ /* + * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy + * without pd_alloc_fn/pd_free_fn can't be activated. + */ + if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || + (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) + return -EINVAL; + mutex_lock(&blkcg_pol_register_mutex); mutex_lock(&blkcg_pol_mutex);
/* find an empty slot */ - ret = -ENOSPC; for (i = 0; i < BLKCG_MAX_POLS; i++) if (!blkcg_policy[i]) break; if (i >= BLKCG_MAX_POLS) { pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); + ret = -ENOSPC; goto err_unlock; }
- /* - * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy - * without pd_alloc_fn/pd_free_fn can't be activated. - */ - if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || - (!pol->pd_alloc_fn ^ !pol->pd_free_fn)) - goto err_unlock; - /* register @pol */ pol->plid = i; blkcg_policy[pol->plid] = pol; @@ -1756,8 +1756,10 @@ int blkcg_policy_register(struct blkcg_policy *pol) struct blkcg_policy_data *cpd;
cpd = pol->cpd_alloc_fn(GFP_KERNEL); - if (!cpd) + if (!cpd) { + ret = -ENOMEM; goto err_free_cpds; + }
blkcg->cpd[pol->plid] = cpd; cpd->blkcg = blkcg; diff --git a/block/blk-settings.c b/block/blk-settings.c index 1e63e3dd5440..7858c92b4483 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -124,6 +124,11 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) return 0; }
+ if (lim->features & BLK_FEAT_BOUNCE_HIGH) { + pr_warn("no bounce buffer support for integrity metadata\n"); + return -EINVAL; + } + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { pr_warn("integrity support disabled.\n"); return -EINVAL; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2c4192e12efa..6b82fcbd7e77 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1593,13 +1593,6 @@ static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw) return tg_may_dispatch(tg, bio, NULL); }
-static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw) -{ - if (!bio_flagged(bio, BIO_BPS_THROTTLED)) - tg->carryover_bytes[rw] -= throtl_bio_data_size(bio); - tg->carryover_ios[rw]--; -} - bool __blk_throtl_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); @@ -1636,10 +1629,12 @@ bool __blk_throtl_bio(struct bio *bio) /* * IOs which may cause priority inversions are * dispatched directly, even if they're over limit. - * Debts are handled by carryover_bytes/ios while - * calculating wait time. + * + * Charge and dispatch directly, and our throttle + * control algorithm is adaptive, and extra IO bytes + * will be throttled for paying the debt */ - tg_dispatch_in_debt(tg, bio, rw); + throtl_charge_bio(tg, bio); } else { /* if above limits, break to queue */ break; diff --git a/block/blk-zoned.c b/block/blk-zoned.c index c11db5be2532..414118435240 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -347,6 +347,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */ + inode_lock(bdev->bd_mapping->host); filemap_invalidate_lock(bdev->bd_mapping); ret = blkdev_truncate_zone_range(bdev, mode, &zrange); if (ret) @@ -368,8 +369,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
fail: - if (cmd == BLKRESETZONE) + if (cmd == BLKRESETZONE) { filemap_invalidate_unlock(bdev->bd_mapping); + inode_unlock(bdev->bd_mapping->host); + }
return ret; } diff --git a/block/blk.h b/block/blk.h index 1426f9c28197..e91012247ff2 100644 --- a/block/blk.h +++ b/block/blk.h @@ -482,7 +482,8 @@ static inline void blk_zone_update_request_bio(struct request *rq, * the original BIO sector so that blk_zone_write_plug_bio_endio() can * lookup the zone write plug. */ - if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio)) + if (req_op(rq) == REQ_OP_ZONE_APPEND || + bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) bio->bi_iter.bi_sector = rq->__sector; } void blk_zone_write_plug_bio_endio(struct bio *bio); diff --git a/block/bounce.c b/block/bounce.c index 0d898cd5ec49..09a9616cf209 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -41,8 +41,6 @@ static void init_bounce_bioset(void)
ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); BUG_ON(ret); - if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) - BUG_ON(1);
ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); BUG_ON(ret); diff --git a/block/fops.c b/block/fops.c index 43983be5a2b3..d4b1d942f270 100644 --- a/block/fops.c +++ b/block/fops.c @@ -721,7 +721,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = direct_write_fallback(iocb, from, ret, blkdev_buffered_write(iocb, from)); } else { + /* + * Take i_rwsem and invalidate_lock to avoid racing with + * set_blocksize changing i_blkbits/folio order and punching + * out the pagecache. + */ + inode_lock_shared(bd_inode); ret = blkdev_buffered_write(iocb, from); + inode_unlock_shared(bd_inode); }
if (ret > 0) @@ -732,6 +739,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { + struct inode *bd_inode = bdev_file_inode(iocb->ki_filp); struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); loff_t size = bdev_nr_bytes(bdev); loff_t pos = iocb->ki_pos; @@ -768,7 +776,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) goto reexpand; }
+ /* + * Take i_rwsem and invalidate_lock to avoid racing with set_blocksize + * changing i_blkbits/folio order and punching out the pagecache. + */ + inode_lock_shared(bd_inode); ret = filemap_read(iocb, to, ret); + inode_unlock_shared(bd_inode);
reexpand: if (unlikely(shorted)) @@ -811,6 +825,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, if ((start | len) & (bdev_logical_block_size(bdev) - 1)) return -EINVAL;
+ inode_lock(inode); filemap_invalidate_lock(inode->i_mapping);
/* @@ -843,6 +858,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
fail: filemap_invalidate_unlock(inode->i_mapping); + inode_unlock(inode); return error; }
diff --git a/block/ioctl.c b/block/ioctl.c index 6554b728bae6..919066b4bb49 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -141,6 +141,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, if (err) return err;
+ inode_lock(bdev->bd_mapping->host); filemap_invalidate_lock(bdev->bd_mapping); err = truncate_bdev_range(bdev, mode, start, start + len - 1); if (err) @@ -173,6 +174,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, blk_finish_plug(&plug); fail: filemap_invalidate_unlock(bdev->bd_mapping); + inode_unlock(bdev->bd_mapping->host); return err; }
@@ -198,12 +200,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, end > bdev_nr_bytes(bdev)) return -EINVAL;
+ inode_lock(bdev->bd_mapping->host); filemap_invalidate_lock(bdev->bd_mapping); err = truncate_bdev_range(bdev, mode, start, end - 1); if (!err) err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, GFP_KERNEL); filemap_invalidate_unlock(bdev->bd_mapping); + inode_unlock(bdev->bd_mapping->host); return err; }
@@ -235,6 +239,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode, return -EINVAL;
/* Invalidate the page cache, including dirty pages */ + inode_lock(bdev->bd_mapping->host); filemap_invalidate_lock(bdev->bd_mapping); err = truncate_bdev_range(bdev, mode, start, end); if (err) @@ -245,6 +250,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
fail: filemap_invalidate_unlock(bdev->bd_mapping); + inode_unlock(bdev->bd_mapping->host); return err; }
diff --git a/crypto/ahash.c b/crypto/ahash.c index bcd9de009a91..fe19bf7f15eb 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -473,6 +473,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash);
crypto_ahash_set_statesize(hash, alg->halg.statesize); + crypto_ahash_set_reqsize(hash, alg->reqsize);
if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); @@ -638,6 +639,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (alg->halg.statesize == 0) return -EINVAL;
+ if (alg->reqsize && alg->reqsize < alg->halg.statesize) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5498a87249d3..e3f1a4852737 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state); - if (err) { - sock_orphan(sk2); - sock_put(sk2); - }
out_free_state: kfree_sensitive(state); diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0631d975bfac..0abc2d87f042 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err;
- err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK) return -EINVAL; diff --git a/crypto/lzo.c b/crypto/lzo.c index ebda132dd22b..8338851c7406 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen, size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err;
- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK) return -EINVAL; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index ceed7f33a67b..fd3273b519dc 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -844,6 +844,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
/* Only sync algorithms allowed. */ mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; + type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index f139c564eadf..10e711c96a67 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -432,7 +432,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev) int bars; int ret;
- bars = pci_select_bars(pdev, IORESOURCE_MEM); + bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
/* make sure the device has the expected BARs */ if (bars != (BIT(0) | BIT(2) | BIT(4))) { diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index d67f63d93b2a..89fa569d6370 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -443,7 +443,7 @@ config ACPI_SBS the modules will be called sbs and sbshc.
config ACPI_HED - tristate "Hardware Error Device" + bool "Hardware Error Device" help This driver supports the Hardware Error Device (PNP0C33), which is used to report some hardware errors notified via diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 01abf26764b0..3f5a1840f573 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -355,8 +355,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc * device represented by it. */ static const struct acpi_device_id acpi_nonpnp_device_ids[] = { + {"INT3F0D"}, {"INTC1080"}, {"INTC1081"}, + {"INTC1099"}, {""}, };
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index 7652515a6be1..3499f86c411e 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = { .remove = acpi_hed_remove, }, }; -module_acpi_driver(acpi_hed_driver); + +static int __init acpi_hed_driver_init(void) +{ + return acpi_bus_register_driver(&acpi_hed_driver); +} +subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 19b619376d48..09020bb8ad15 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -595,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd) return 0; }
-struct charlcd *charlcd_alloc(void) +struct charlcd *charlcd_alloc(unsigned int drvdata_size) { struct charlcd_priv *priv; struct charlcd *lcd;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL); if (!priv) return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd; + lcd->drvdata = priv->drvdata;
return lcd; } diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h index 4d4287209d04..d10b89740bca 100644 --- a/drivers/auxdisplay/charlcd.h +++ b/drivers/auxdisplay/charlcd.h @@ -51,7 +51,7 @@ struct charlcd { unsigned long y; } addr;
- void *drvdata; + void *drvdata; /* Set by charlcd_alloc() */ };
/** @@ -95,7 +95,8 @@ struct charlcd_ops { };
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on); -struct charlcd *charlcd_alloc(void); + +struct charlcd *charlcd_alloc(unsigned int drvdata_size); void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd); diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index 41807ce36339..9428f951c9bf 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev) if (!hdc) return -ENOMEM;
- lcd = charlcd_alloc(); + lcd = charlcd_alloc(0); if (!lcd) goto fail1;
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c index 6422be0dfe20..0ecf6a9469f2 100644 --- a/drivers/auxdisplay/lcd2s.c +++ b/drivers/auxdisplay/lcd2s.c @@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c) if (err < 0) return err;
- lcd = charlcd_alloc(); + lcd = charlcd_alloc(0); if (!lcd) return -ENOMEM;
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 6dc8798d01f9..4da142692d55 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -835,7 +835,7 @@ static void lcd_init(void) if (!hdc) return;
- charlcd = charlcd_alloc(); + charlcd = charlcd_alloc(0); if (!charlcd) { kfree(hdc); return; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6bd44ec2c9b1..0843d229b0f7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -916,7 +916,7 @@ static unsigned int loop_default_blocksize(struct loop_device *lo, struct block_device *backing_bdev) { /* In case of direct I/O, match underlying block size */ - if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev) + if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && backing_bdev) return bdev_logical_block_size(backing_bdev); return SECTOR_SIZE; } @@ -969,9 +969,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, if (!file) return -EBADF;
- if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter) - return -EINVAL; - error = loop_check_backing_file(file); if (error) return error; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 38b9e485e520..a01a547c562f 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -484,15 +484,17 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+ +#define UBLK_MAX_UBLKS UBLK_MINORS + /* - * Max ublk devices allowed to add + * Max unprivileged ublk devices allowed to add * * It can be extended to one per-user limit in future or even controlled * by cgroup. */ -#define UBLK_MAX_UBLKS UBLK_MINORS -static unsigned int ublks_max = 64; -static unsigned int ublks_added; /* protected by ublk_ctl_mutex */ +static unsigned int unprivileged_ublks_max = 64; +static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
@@ -1879,10 +1881,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, return -EIOCBQUEUED;
out: - io_uring_cmd_done(cmd, ret, 0, issue_flags); pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n", __func__, cmd_op, tag, ret, io->flags); - return -EIOCBQUEUED; + return ret; }
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, @@ -1938,7 +1939,10 @@ static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd, static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd, unsigned int issue_flags) { - ublk_ch_uring_cmd_local(cmd, issue_flags); + int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); + + if (ret != -EIOCBQUEUED) + io_uring_cmd_done(cmd, ret, 0, issue_flags); }
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) @@ -2203,7 +2207,8 @@ static int ublk_add_chdev(struct ublk_device *ub) if (ret) goto fail;
- ublks_added++; + if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) + unprivileged_ublks_added++; return 0; fail: put_device(dev); @@ -2241,12 +2246,17 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub) { + bool unprivileged; + ublk_stop_dev(ub); cancel_work_sync(&ub->stop_work); cancel_work_sync(&ub->quiesce_work); cdev_device_del(&ub->cdev, &ub->cdev_dev); + unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV; ublk_put_device(ub); - ublks_added--; + + if (unprivileged) + unprivileged_ublks_added--; }
static struct ublk_device *ublk_get_device_from_id(int idx) @@ -2495,7 +2505,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) return ret;
ret = -EACCES; - if (ublks_added >= ublks_max) + if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) && + unprivileged_ublks_added >= unprivileged_ublks_max) goto out_unlock;
ret = -ENOMEM; @@ -3056,10 +3067,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, if (ub) ublk_put_device(ub); out: - io_uring_cmd_done(cmd, ret, 0, issue_flags); pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n", __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id); - return -EIOCBQUEUED; + return ret; }
static const struct file_operations ublk_ctl_fops = { @@ -3123,23 +3133,26 @@ static void __exit ublk_exit(void) module_init(ublk_init); module_exit(ublk_exit);
-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp) +static int ublk_set_max_unprivileged_ublks(const char *buf, + const struct kernel_param *kp) { return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS); }
-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp) +static int ublk_get_max_unprivileged_ublks(char *buf, + const struct kernel_param *kp) { - return sysfs_emit(buf, "%u\n", ublks_max); + return sysfs_emit(buf, "%u\n", unprivileged_ublks_max); }
-static const struct kernel_param_ops ublk_max_ublks_ops = { - .set = ublk_set_max_ublks, - .get = ublk_get_max_ublks, +static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = { + .set = ublk_set_max_unprivileged_ublks, + .get = ublk_get_max_unprivileged_ublks, };
-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644); -MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)"); +module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops, + &unprivileged_ublks_max, 0644); +MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei ming.lei@redhat.com"); MODULE_DESCRIPTION("Userspace block device"); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 11d33cd7b08f..13dcc0077732 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work) } while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */ - sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL); + if (bdev->func->irq_handler) + sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
@@ -722,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */ + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + return 0; + sdio_claim_host(bdev->func);
/* Disable interrupt */ @@ -1429,11 +1434,15 @@ static void btmtksdio_remove(struct sdio_func *func) if (!bdev) return;
+ hdev = bdev->hdev; + + /* Make sure to call btmtksdio_close before removing sdio card */ + if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + btmtksdio_close(hdev); + /* Be consistent the state in btmtksdio_probe */ pm_runtime_get_noresume(bdev->dev);
- hdev = bdev->hdev; - sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7e1f03231b4c..af2be0271806 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2979,9 +2979,8 @@ static void btusb_coredump_qca(struct hci_dev *hdev) static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) { int ret = 0; + unsigned int skip = 0; u8 pkt_type; - u8 *sk_ptr; - unsigned int sk_len; u16 seqno; u32 dump_size;
@@ -2990,18 +2989,13 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb); - sk_ptr = skb->data; - sk_len = skb->len; + skip = sizeof(struct hci_event_hdr); + if (pkt_type == HCI_ACLDATA_PKT) + skip += sizeof(struct hci_acl_hdr);
- if (pkt_type == HCI_ACLDATA_PKT) { - sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - } - - sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + skb_pull(skb, skip); + dump_hdr = (struct qca_dump_hdr *)skb->data;
- dump_hdr = (struct qca_dump_hdr *)sk_ptr; seqno = le16_to_cpu(dump_hdr->seqno); if (seqno == 0) { set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); @@ -3021,16 +3015,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size; btdata->qca_dump.ram_dump_seqno = 0; - sk_ptr += offsetof(struct qca_dump_hdr, data0); - sk_len -= offsetof(struct qca_dump_hdr, data0); + + skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev); bt_dev_info(hdev, "%s memdump size(%u)\n", (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", dump_size); } else { - sk_ptr += offsetof(struct qca_dump_hdr, data); - sk_len -= offsetof(struct qca_dump_hdr, data); + skb_pull(skb, offsetof(struct qca_dump_hdr, data)); }
if (!btdata->qca_dump.ram_dump_size) { @@ -3050,7 +3043,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) return ret; }
- skb_pull(skb, skb->len - sk_len); hci_devcd_append(hdev, skb); btdata->qca_dump.ram_dump_seqno++; if (seqno == QCA_LAST_SEQUENCE_NUM) { @@ -3078,68 +3070,58 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) /* Return: true if the ACL packet is a dump packet, false otherwise. */ static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) { - u8 *sk_ptr; - unsigned int sk_len; - struct hci_event_hdr *event_hdr; struct hci_acl_hdr *acl_hdr; struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false;
- sk_ptr = skb->data; - sk_len = skb->len; - - acl_hdr = hci_acl_hdr(skb); - if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) + if (!clone) return false;
- sk_ptr += HCI_ACL_HDR_SIZE; - sk_len -= HCI_ACL_HDR_SIZE; - event_hdr = (struct hci_event_hdr *)sk_ptr; - - if ((event_hdr->evt != HCI_VENDOR_PKT) || - (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) - return false; + acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr)); + if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)) + goto out;
- sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out;
- dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) || - (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || - (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return false; + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out;
- return true; + is_dump = true; +out: + consume_skb(clone); + return is_dump; }
/* Return: true if the event packet is a dump packet, false otherwise. */ static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) { - u8 *sk_ptr; - unsigned int sk_len; - struct hci_event_hdr *event_hdr; struct qca_dump_hdr *dump_hdr; + struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); + bool is_dump = false;
- sk_ptr = skb->data; - sk_len = skb->len; - - event_hdr = hci_event_hdr(skb); - - if ((event_hdr->evt != HCI_VENDOR_PKT) - || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + if (!clone) return false;
- sk_ptr += HCI_EVENT_HDR_SIZE; - sk_len -= HCI_EVENT_HDR_SIZE; + event_hdr = skb_pull_data(clone, sizeof(*event_hdr)); + if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT)) + goto out;
- dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) || - (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || - (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return false; + dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr)); + if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + goto out;
- return true; + is_dump = true; +out: + consume_skb(clone); + return is_dump; }
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index ecea08915730..cf0b83154044 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -974,7 +974,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip) int rc;
if (chip->auth) { - dev_warn_once(&chip->dev, "auth session is active\n"); + dev_dbg_once(&chip->dev, "auth session is active\n"); return 0; }
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 014db6386624..8ddf3a9a53df 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev) if (!clk_data) return -ENOMEM;
+ clk_data->num = S2MPS11_CLKS_NUM; + switch (hwid) { case S2MPS11X: s2mps11_reg = S2MPS11_REG_RTC_CTRL; @@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev) clk_data->hws[i] = &s2mps11_clks[i].hw; }
- clk_data->num = S2MPS11_CLKS_NUM; of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index fb18f507f121..fe6dac70f1a1 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -8,6 +8,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/units.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -406,11 +407,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_ static struct clk_hw **hws; static struct clk_hw_onecell_data *clk_hw_data;
+struct imx8mp_clock_constraints { + unsigned int clkid; + u32 maxrate; +}; + +/* + * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023 + * Table 13. Maximum frequency of modules. + * Probable typos fixed are marked with a comment. + */ +static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = { + { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */ + { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */ + { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */ + { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_AHB, 133333333 }, + { IMX8MP_CLK_IPG_ROOT, 66666667 }, + { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ }, + { IMX8MP_CLK_DRAM_ALT, 666666667 }, + { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ }, + { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ }, + { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ }, + { /* Sentinel */ } +}; + +static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = { + { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */ + { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ }, + { /* Sentinel */ } +}; + +static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = { + { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ}, + { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */ + { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ }, + { /* Sentinel */ } +}; + +static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[]) +{ + const struct imx8mp_clock_constraints *constr; + + for (constr = constraints; constr->clkid; constr++) + clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate); +} + static int imx8mp_clocks_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np; void __iomem *anatop_base, *ccm_base; + const char *opmode; int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop"); @@ -715,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints); + + err = of_property_read_string(np, "fsl,operating-mode", &opmode); + if (!err) { + if (!strcmp(opmode, "nominal")) + imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints); + else if (!strcmp(opmode, "overdrive")) + imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints); + } + err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); if (err < 0) { dev_err(dev, "failed to register hws for i.MX8MP\n"); diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 16145f74bbc8..fd605bccf48d 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -199,7 +199,7 @@ config IPQ_GCC_4019
config IPQ_GCC_5018 tristate "IPQ5018 Global Clock Controller" - depends on ARM64 || COMPILE_TEST + depends on ARM || ARM64 || COMPILE_TEST help Support for global clock controller on ipq5018 devices. Say Y if you want to use peripheral devices such as UART, SPI, diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c index 34d2f17520dc..450ddbebd35f 100644 --- a/drivers/clk/qcom/camcc-sm8250.c +++ b/drivers/clk/qcom/camcc-sm8250.c @@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = { .parent_data = cam_cc_parent_data_2, .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = { .parent_data = cam_cc_parent_data_3, .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .parent_data = cam_cc_parent_data_4, .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
@@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, };
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 10e276dabff9..e76ecc466351 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -670,14 +670,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) + return 0;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); if (ctl & PLL_ALPHA_EN) { - regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low); + if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low)) + return 0; if (alpha_width > 32) { - regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), - &high); + if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), + &high)) + return 0; a = (u64)high << 32 | low; } else { a = low & GENMASK(alpha_width - 1, 0); @@ -903,8 +908,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, alpha = 0, ctl, alpha_m, alpha_n;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); - regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) + return 0;
if (ctl & PLL_ALPHA_EN) { regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha); @@ -1098,8 +1106,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); - regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac)) + return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); } @@ -1157,7 +1168,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); u32 ctl;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); + if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) + return 0;
ctl >>= PLL_POST_DIV_SHIFT; ctl &= PLL_POST_DIV_MASK(pll); @@ -1373,8 +1385,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw, struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); - regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac)) + return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); } @@ -1524,7 +1539,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct regmap *regmap = pll->clkr.regmap; u32 i, div = 1, val;
- regmap_read(regmap, PLL_USER_CTL(pll), &val); + if (regmap_read(regmap, PLL_USER_CTL(pll), &val)) + return 0;
val >>= pll->post_div_shift; val &= PLL_POST_DIV_MASK(pll); @@ -2451,9 +2467,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw, struct regmap *regmap = pll->clkr.regmap; u32 l, frac;
- regmap_read(regmap, PLL_L_VAL(pll), &l); + if (regmap_read(regmap, PLL_L_VAL(pll), &l)) + return 0; l &= LUCID_EVO_PLL_L_VAL_MASK; - regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac); + + if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac)) + return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll)); } @@ -2528,7 +2547,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw, struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0;
return parent_rate * l; } diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c index 45e726477086..22169da08a51 100644 --- a/drivers/clk/qcom/lpassaudiocc-sc7280.c +++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/clk-provider.h> @@ -713,14 +714,24 @@ static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = { [LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 }, };
+static const struct regmap_config lpass_audio_cc_sc7280_reset_regmap_config = { + .name = "lpassaudio_cc_reset", + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .max_register = 0xc8, +}; + static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = { - .config = &lpass_audio_cc_sc7280_regmap_config, + .config = &lpass_audio_cc_sc7280_reset_regmap_config, .resets = lpass_audio_cc_sc7280_resets, .num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets), };
static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = { - { .compatible = "qcom,sc7280-lpassaudiocc" }, + { .compatible = "qcom,qcm6490-lpassaudiocc", .data = &lpass_audio_cc_reset_sc7280_desc }, + { .compatible = "qcom,sc7280-lpassaudiocc", .data = &lpass_audio_cc_sc7280_desc }, { } }; MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table); @@ -752,13 +763,17 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) struct regmap *regmap; int ret;
+ desc = device_get_match_data(&pdev->dev); + + if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcm6490-lpassaudiocc")) + return qcom_cc_probe_by_index(pdev, 1, desc); + ret = lpass_audio_setup_runtime_pm(pdev); if (ret) return ret;
lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc"; lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000; - desc = &lpass_audio_cc_sc7280_desc;
regmap = qcom_cc_map(pdev, desc); if (IS_ERR(regmap)) { @@ -772,7 +787,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) regmap_write(regmap, 0x4, 0x3b); regmap_write(regmap, 0x8, 0xff05);
- ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap); + ret = qcom_cc_really_probe(&pdev->dev, desc, regmap); if (ret) { dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n"); goto exit; diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 229f4540b219..97d42328fa81 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -1549,28 +1549,6 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv) return devm_reset_controller_register(priv->dev, &priv->rcdev); }
-static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv, - const struct of_phandle_args *clkspec) -{ - const struct rzg2l_cpg_info *info = priv->info; - unsigned int id; - unsigned int i; - - if (clkspec->args_count != 2) - return false; - - if (clkspec->args[0] != CPG_MOD) - return false; - - id = clkspec->args[1] + info->num_total_core_clks; - for (i = 0; i < info->num_no_pm_mod_clks; i++) { - if (info->no_pm_mod_clks[i] == id) - return false; - } - - return true; -} - /** * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure * @onecell_data: cell data @@ -1595,45 +1573,73 @@ struct rzg2l_cpg_pd { u16 id; };
+static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd, + const struct of_phandle_args *clkspec) +{ + if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2) + return false; + + switch (clkspec->args[0]) { + case CPG_MOD: { + struct rzg2l_cpg_priv *priv = pd->priv; + const struct rzg2l_cpg_info *info = priv->info; + unsigned int id = clkspec->args[1]; + + if (id >= priv->num_mod_clks) + return false; + + id += info->num_total_core_clks; + + for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) { + if (info->no_pm_mod_clks[i] == id) + return false; + } + + return true; + } + + case CPG_CORE: + default: + return false; + } +} + static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) { struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd); - struct rzg2l_cpg_priv *priv = pd->priv; struct device_node *np = dev->of_node; struct of_phandle_args clkspec; bool once = true; struct clk *clk; + unsigned int i; int error; - int i = 0; - - while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, - &clkspec)) { - if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) { - if (once) { - once = false; - error = pm_clk_create(dev); - if (error) { - of_node_put(clkspec.np); - goto err; - } - } - clk = of_clk_get_from_provider(&clkspec); + + for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) { + if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) { of_node_put(clkspec.np); - if (IS_ERR(clk)) { - error = PTR_ERR(clk); - goto fail_destroy; - } + continue; + }
- error = pm_clk_add_clk(dev, clk); + if (once) { + once = false; + error = pm_clk_create(dev); if (error) { - dev_err(dev, "pm_clk_add_clk failed %d\n", - error); - goto fail_put; + of_node_put(clkspec.np); + goto err; } - } else { - of_node_put(clkspec.np); } - i++; + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + if (IS_ERR(clk)) { + error = PTR_ERR(clk); + goto fail_destroy; + } + + error = pm_clk_add_clk(dev, clk); + if (error) { + dev_err(dev, "pm_clk_add_clk failed %d\n", error); + goto fail_put; + } }
return 0; diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index 3f095515f54f..54d2c7f0ed63 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { { .hw = &pll_periph0_2x_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); - -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", + mmc0_mmc1_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); + +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", + mmc0_mmc1_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0);
static const struct clk_parent_data mmc2_parents[] = { { .fw_name = "hosc" }, @@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { { .hw = &pll_periph0_800M_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, + 0x838, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, 0x84c, BIT(0), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h index 6e50f3728fb5..7d836a9fb3db 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.h +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -52,6 +52,28 @@ struct ccu_mp { } \ }
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ + _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _gate, _postdiv, _flags)\ + struct ccu_mp _struct = { \ + .enable = _gate, \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ + .fixed_post_div = _postdiv, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FIXED_POSTDIV, \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ + _parents, \ + &ccu_mp_ops, \ + _flags), \ + } \ + } + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ _mshift, _mwidth, \ _pshift, _pwidth, \ diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 110347707ff9..8592910710d1 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -115,6 +115,9 @@ static void gic_update_frequency(void *data)
static int gic_starting_cpu(unsigned int cpu) { + /* Ensure the GIC counter is running */ + clear_gic_config(GIC_CONFIG_COUNTSTOP); + gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device)); return 0; } @@ -252,9 +255,6 @@ static int __init gic_clocksource_of_init(struct device_node *node) pr_warn("Unable to register clock notifier\n"); }
- /* And finally start the counter */ - clear_gic_config(GIC_CONFIG_COUNTSTOP); - /* * It's safe to use the MIPS GIC timer as a sched clock source only if * its ticks are stable, which is true on either the platforms with diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 48ce50c5f5e6..4d7cf338824a 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -126,7 +126,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
static int riscv_timer_dying_cpu(unsigned int cpu) { + /* + * Stop the timer when the cpu is going to be offline otherwise + * the timer interrupt may be pending while performing power-down. + */ + riscv_clock_event_stop(); disable_percpu_irq(riscv_clock_event_irq); + return 0; }
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 9db5354fdb02..7a16d1932228 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -696,7 +696,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) pr_err("Boost mode is not supported by this processor or SBIOS\n"); return -EOPNOTSUPP; } - guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state); WRITE_ONCE(cpudata->boost_state, !ret ? state : false); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 78ad3221fe07..67bac12d4d55 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -172,6 +172,7 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,sm8350", }, { .compatible = "qcom,sm8450", }, { .compatible = "qcom,sm8550", }, + { .compatible = "qcom,sm8650", },
{ .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 7b8fcfa55038..4e5b6f9a56d1 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; + u32 cpu;
policy->freq_table = data->clusters[cluster].table; policy->cpuinfo.transition_latency = 300 * 1000; policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */ + for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { + if (data->cpus[cpu].bpmp_cluster_id == cluster) + cpumask_set_cpu(cpu, policy->cpus); + } + return 0; }
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index f3c9d49f0f2a..97ffadc7e57a 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -239,8 +239,19 @@ static unsigned int get_typical_interval(struct menu_device *data) * This can deal with workloads that have long pauses interspersed * with sporadic activity with a bunch of short pauses. */ - if ((divisor * 4) <= INTERVALS * 3) + if (divisor * 4 <= INTERVALS * 3) { + /* + * If there are sufficiently many data points still under + * consideration after the outliers have been eliminated, + * returning without a prediction would be a mistake because it + * is likely that the next interval will not exceed the current + * maximum, so return the latter in that case. + */ + if (divisor >= INTERVALS / 2) + return max; + return UINT_MAX; + }
thresh = max - 1; goto again; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c index 5387c68f3c9d..426244107037 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c @@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs, break; }
- dev_err(&pdev->dev, - "Request failed with software error code 0x%x\n", - cpt_status->s.uc_compcode); + pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n", + cpt_status->s.uc_compcode, + info->req->areq->tfm->__crt_alg->cra_name, + info->req->areq->tfm->__crt_alg->cra_driver_name); otx2_cpt_dump_sg_list(pdev, info->req); break; } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index 77a6301f37f0..29c0c69d5905 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, MXS_DCP_CONTROL0_INTERRUPT | MXS_DCP_CONTROL0_ENABLE_CIPHER;
- if (key_referenced) - /* Set OTP key bit to select the key via KEY_SELECT. */ - desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; - else + if (!key_referenced) /* Payload contains the key. */ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + else if (actx->key[0] == DCP_PAES_KEY_OTP) + /* Set OTP key bit to select the key via KEY_SELECT. */ + desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c index 27645606f900..4794d58dab55 100644 --- a/drivers/dma/fsl-edma-main.c +++ b/drivers/dma/fsl-edma-main.c @@ -56,7 +56,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
intr = edma_readl_chreg(fsl_chan, ch_int); if (!intr) - return IRQ_HANDLED; + return IRQ_NONE;
edma_writel_chreg(fsl_chan, 1, ch_int);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 57f1bf2ab20b..22aa2bab3693 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -412,6 +412,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) return -EPERM;
+ if (current->mm != ctx->mm) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -478,6 +481,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t ssize_t written = 0; int i;
+ if (current->mm != ctx->mm) + return -EPERM; + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@@ -498,6 +504,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_device *idxd = wq->idxd; __poll_t out = 0;
+ if (current->mm != ctx->mm) + return POLLNVAL; + poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c index 1877201d1aa9..20bdc52f63a5 100644 --- a/drivers/dpll/dpll_core.c +++ b/drivers/dpll/dpll_core.c @@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop) static int dpll_pin_prop_dup(const struct dpll_pin_properties *src, struct dpll_pin_properties *dst) { + if (WARN_ON(src->freq_supported && !src->freq_supported_num)) + return -EINVAL; + memcpy(dst, src, sizeof(*dst)); - if (src->freq_supported && src->freq_supported_num) { + if (src->freq_supported) { size_t freq_size = src->freq_supported_num * sizeof(*src->freq_supported); dst->freq_supported = kmemdup(src->freq_supported, diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 56be8ef40f37..e3635fba63b4 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -405,10 +405,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) int i, j, ret; struct mem_ctl_info *mci = NULL; struct edac_mc_layer layers[2]; - struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL]; void __iomem *window; struct ie31200_priv *priv; - u32 addr_decode, mad_offset; + u32 addr_decode[IE31200_CHANNELS], mad_offset;
/* * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit @@ -466,19 +465,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) mad_offset = IE31200_MAD_DIMM_0_OFFSET; }
- /* populate DIMM info */ for (i = 0; i < IE31200_CHANNELS; i++) { - addr_decode = readl(window + mad_offset + + addr_decode[i] = readl(window + mad_offset + (i * 4)); - edac_dbg(0, "addr_decode: 0x%x\n", addr_decode); - for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) { - populate_dimm_info(&dimm_info[i][j], addr_decode, j, - skl); - edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n", - dimm_info[i][j].size, - dimm_info[i][j].dual_rank, - dimm_info[i][j].x16_width); - } + edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]); }
/* @@ -489,14 +479,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) */ for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) { for (j = 0; j < IE31200_CHANNELS; j++) { + struct dimm_data dimm_info; struct dimm_info *dimm; unsigned long nr_pages;
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl); + populate_dimm_info(&dimm_info, addr_decode[j], i, + skl); + edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n", + dimm_info.size, + dimm_info.dual_rank, + dimm_info.x16_width); + + nr_pages = IE31200_PAGES(dimm_info.size, skl); if (nr_pages == 0) continue;
- if (dimm_info[j][i].dual_rank) { + if (dimm_info.dual_rank) { nr_pages = nr_pages / 2; dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0); dimm->nr_pages = nr_pages; diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index dfda5ffc14db..dea3eb741d95 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -212,6 +212,7 @@ ffa_device_register(const struct ffa_partition_info *part_info, dev = &ffa_dev->dev; dev->bus = &ffa_bus_type; dev->release = ffa_release_device; + dev->dma_mask = &dev->coherent_dma_mask; dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id; diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index dce448687e28..47751b2c057a 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -150,6 +150,14 @@ static int ffa_version_check(u32 *version) return -EOPNOTSUPP; }
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) { + pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n", + FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), + FFA_MAJOR_VERSION(FFA_DRIVER_VERSION), + FFA_MINOR_VERSION(FFA_DRIVER_VERSION)); + return -EINVAL; + } + if (ver.a0 < FFA_MIN_VERSION) { pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n", FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0), @@ -1441,6 +1449,10 @@ static int ffa_setup_partitions(void)
kfree(pbuf);
+ /* Check if the host is already added as part of partition info */ + if (xa_load(&drv_info->partition_info, drv_info->vm_id)) + return 0; + /* Allocate for the host */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 782c9bec8361..73a6ab4a224d 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -42,7 +42,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); * This helper let an SCMI driver request specific devices identified by the * @id_table to be created for each active SCMI instance. * - * The requested device name MUST NOT be already existent for any protocol; + * The requested device name MUST NOT be already existent for this protocol; * at first the freshly requested @id_table is annotated in the IDR table * @scmi_requested_devices and then the requested device is advertised to any * registered party via the @scmi_requested_devices_nh notification chain. @@ -52,7 +52,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0); static int scmi_protocol_device_request(const struct scmi_device_id *id_table) { int ret = 0; - unsigned int id = 0; struct list_head *head, *phead = NULL; struct scmi_requested_dev *rdev;
@@ -67,19 +66,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table) }
/* - * Search for the matching protocol rdev list and then search - * of any existent equally named device...fails if any duplicate found. + * Find the matching protocol rdev list and then search of any + * existent equally named device...fails if any duplicate found. */ mutex_lock(&scmi_requested_devices_mtx); - idr_for_each_entry(&scmi_requested_devices, head, id) { - if (!phead) { - /* A list found registered in the IDR is never empty */ - rdev = list_first_entry(head, struct scmi_requested_dev, - node); - if (rdev->id_table->protocol_id == - id_table->protocol_id) - phead = head; - } + phead = idr_find(&scmi_requested_devices, id_table->protocol_id); + if (phead) { + head = phead; list_for_each_entry(rdev, head, node) { if (!strcmp(rdev->id_table->name, id_table->name)) { pr_err("Ignoring duplicate request [%d] %s\n", diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index add8acf66a9c..5578158f1375 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1012,17 +1012,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status); int zynqmp_pm_fpga_get_config_status(u32 *value) { u32 ret_payload[PAYLOAD_ARG_CNT]; - u32 buf, lower_addr, upper_addr; int ret;
if (!value) return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf); - upper_addr = upper_32_bits((u64)&buf); - ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4, - XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr, + XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0, XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1]; diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 6b0914432445..5af0bd33890c 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -52,7 +52,7 @@ /* V2 Defines */ #define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
-#define V2_CREDIT_TIMEOUT_US 20000 +#define V2_CREDIT_TIMEOUT_US 40000 #define V2_CHECK_CREDIT_US 10 #define V2_POLL_TIMEOUT_US 1000000 #define V2_USER_TIMEOUT_US 500000 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f9d119448442..581fe1a48f37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -192,7 +192,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); #if IS_ENABLED(CONFIG_HSA_AMD) bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); -int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); +void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo); int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, unsigned long cur_seq, struct kgd_mem *mem); int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, @@ -212,9 +212,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) }
static inline -int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo) { - return 0; }
static inline diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index fa572ba7f9fc..1465b3adacb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -370,40 +370,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, return 0; }
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +/** + * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences + * @bo: the BO where to remove the evictions fences from. + * + * This functions should only be used on release when all references to the BO + * are already dropped. We remove the eviction fence from the private copy of + * the dma_resv object here since that is what is used during release to + * determine of the BO is idle or not. + */ +void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo) { - struct amdgpu_bo *root = bo; - struct amdgpu_vm_bo_base *vm_bo; - struct amdgpu_vm *vm; - struct amdkfd_process_info *info; - struct amdgpu_amdkfd_fence *ef; - int ret; - - /* we can always get vm_bo from root PD bo.*/ - while (root->parent) - root = root->parent; + struct dma_resv *resv = &bo->tbo.base._resv; + struct dma_fence *fence, *stub; + struct dma_resv_iter cursor;
- vm_bo = root->vm_bo; - if (!vm_bo) - return 0; + dma_resv_assert_held(resv);
- vm = vm_bo->vm; - if (!vm) - return 0; - - info = vm->process_info; - if (!info || !info->eviction_fence) - return 0; - - ef = container_of(dma_fence_get(&info->eviction_fence->base), - struct amdgpu_amdkfd_fence, base); - - BUG_ON(!dma_resv_trylock(bo->tbo.base.resv)); - ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef); - dma_resv_unlock(bo->tbo.base.resv); + stub = dma_fence_get_stub(); + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { + if (!to_amdgpu_amdkfd_fence(fence)) + continue;
- dma_fence_put(&ef->base); - return ret; + dma_resv_replace_fences(resv, fence->context, stub, + DMA_RESV_USAGE_BOOKKEEP); + } + dma_fence_put(stub); }
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cb102ee71d04..ca0411c9500e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -170,6 +170,24 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, static DEVICE_ATTR(pcie_replay_count, 0444, amdgpu_device_get_pcie_replay_count, NULL);
+static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) +{ + int ret = 0; + + if (!amdgpu_sriov_vf(adev)) + ret = sysfs_create_file(&adev->dev->kobj, + &dev_attr_pcie_replay_count.attr); + + return ret; +} + +static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_sriov_vf(adev)) + sysfs_remove_file(&adev->dev->kobj, + &dev_attr_pcie_replay_count.attr); +} + static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t ppos, size_t count) @@ -4030,11 +4048,6 @@ static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev) } #endif
-static const struct attribute *amdgpu_dev_attributes[] = { - &dev_attr_pcie_replay_count.attr, - NULL -}; - static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) { if (amdgpu_mcbp == 1) @@ -4477,7 +4490,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } else adev->ucode_sysfs_en = true;
- r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); + r = amdgpu_device_attr_sysfs_init(adev); if (r) dev_err(adev->dev, "Could not create amdgpu device attr\n");
@@ -4614,7 +4627,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); - sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); + amdgpu_device_attr_sysfs_fini(adev); amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev); @@ -4664,6 +4677,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->fru_info); adev->fru_info = NULL;
+ kfree(adev->xcp_mgr); + adev->xcp_mgr = NULL; + px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!dev_is_removable(&adev->pdev->dev) && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ca8091fd3a24..018240a2ab96 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -111,8 +111,7 @@ #include "amdgpu_isp.h" #endif
-#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" -MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); +MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00 #define mmRCC_CONFIG_MEMSIZE 0xde3 @@ -295,21 +294,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, return ret; }
-static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) +static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, + uint8_t *binary, + const char *fw_name) { const struct firmware *fw; - const char *fw_name; int r;
- switch (amdgpu_discovery) { - case 2: - fw_name = FIRMWARE_IP_DISCOVERY; - break; - default: - dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); - return -EINVAL; - } - r = request_firmware(&fw, fw_name, adev->dev); if (r) { dev_err(adev->dev, "can't load firmware "%s"\n", @@ -402,10 +393,19 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, return 0; }
+static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) +{ + if (amdgpu_discovery == 2) + return "amdgpu/ip_discovery.bin"; + + return NULL; +} + static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; + const char *fw_name; uint16_t offset; uint16_t size; uint16_t checksum; @@ -417,9 +417,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) return -ENOMEM;
/* Read from file if it is the preferred option */ - if (amdgpu_discovery == 2) { + fw_name = amdgpu_discovery_get_fw_name(adev); + if (fw_name != NULL) { dev_info(adev->dev, "use ip discovery information from file"); - r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
if (r) { dev_err(adev->dev, "failed to read ip discovery binary from file\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 2f90fff1b9dd..e63a32c21447 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,6 +42,29 @@ #include <linux/dma-fence-array.h> #include <linux/pci-p2pdma.h>
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops; + +/** + * dma_buf_attach_adev - Helper to get adev of an attachment + * + * @attach: attachment + * + * Returns: + * A struct amdgpu_device * if the attaching device is an amdgpu device or + * partition, NULL otherwise. + */ +static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach) +{ + if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) { + struct drm_gem_object *obj = attach->importer_priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + return amdgpu_ttm_adev(bo->tbo.bdev); + } + + return NULL; +} + /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * @@ -53,11 +76,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { + struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach); struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) + if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && + pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false;
return 0; @@ -456,6 +481,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *gobj;
+ if (!adev) + return false; + if (obj->import_attach) { struct dma_buf *dma_buf = obj->import_attach->dmabuf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1b479bd85135..93c3de2d27d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -172,6 +172,7 @@ uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu; char *amdgpu_virtual_display; bool enforce_isolation; +int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer * migration and restoration of backing memory when handling @@ -1037,6 +1038,13 @@ module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); module_param(enforce_isolation, bool, 0444); MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
+/** + * DOC: modeset (int) + * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto). + */ +MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)"); +module_param_named(modeset, amdgpu_modeset, int, 0444); + /** * DOC: seamless (int) * Seamless boot will keep the image on the screen during the boot process. @@ -2248,6 +2256,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, int ret, retry = 0, i; bool supports_atomic = false;
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA || + (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) { + if (drm_firmware_drivers_only() && amdgpu_modeset == -1) + return -EINVAL; + } + /* skip devices which are owned by radeon */ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { if (amdgpu_unsupported_pciidlist[i] == pdev->device) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 508f02eb0cf8..7de10208e8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -78,6 +78,9 @@ struct amdgpu_ih_ring { #define amdgpu_ih_ts_after(t1, t2) \ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \ + (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL) + /* provided by the ih block */ struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4c4bdc4f51b2..fc588ef598c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1246,28 +1246,36 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (abo->kfd_bo) amdgpu_amdkfd_release_notify(abo);
- /* We only remove the fence if the resv has individualized. */ - WARN_ON_ONCE(bo->type == ttm_bo_type_kernel - && bo->base.resv != &bo->base._resv); - if (bo->base.resv == &bo->base._resv) - amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); + /* + * We lock the private dma_resv object here and since the BO is about to + * be released nobody else should have a pointer to it. + * So when this locking here fails something is wrong with the reference + * counting. + */ + if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv))) + return; + + amdgpu_amdkfd_remove_all_eviction_fences(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev))) - return; + goto out;
- if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) - return; + r = dma_resv_reserve_fences(&bo->base._resv, 1); + if (r) + goto out;
- r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true); - if (!WARN_ON(r)) { - amdgpu_vram_mgr_set_cleared(bo->resource); - amdgpu_bo_fence(abo, fence, false); - dma_fence_put(fence); - } + r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true); + if (WARN_ON(r)) + goto out; + + amdgpu_vram_mgr_set_cleared(bo->resource); + dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL); + dma_fence_put(fence);
- dma_resv_unlock(bo->base.resv); +out: + dma_resv_unlock(&bo->base._resv); }
/** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index d70855d7c61c..48e30e5f8338 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -44,7 +44,7 @@ #include "amdgpu_securedisplay.h" #include "amdgpu_atomfirmware.h"
-#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) +#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
static int psp_load_smu_fw(struct psp_context *psp); static int psp_rap_terminate(struct psp_context *psp); @@ -531,7 +531,6 @@ static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
@@ -541,8 +540,8 @@ static int psp_sw_fini(void *handle) amdgpu_ucode_release(&psp->cap_fw); amdgpu_ucode_release(&psp->toc_fw);
- kfree(cmd); - cmd = NULL; + kfree(psp->cmd); + psp->cmd = NULL;
psp_free_shared_bufs(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index bb7b9b2eaac1..8da0bddab3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -383,6 +383,45 @@ int amdgpu_umc_fill_error_record(struct ras_err_data *err_data, return 0; }
+static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func, + void *data) +{ + uint32_t umc_node_inst; + uint32_t node_inst; + uint32_t umc_inst; + uint32_t ch_inst; + int ret; + + /* + * This loop is done based on the following - + * umc.active mask = mask of active umc instances across all nodes + * umc.umc_inst_num = maximum number of umc instancess per node + * umc.node_inst_num = maximum number of node instances + * Channel instances are not assumed to be harvested. + */ + dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d", + adev->umc.active_mask, adev->umc.umc_inst_num); + for_each_set_bit(umc_node_inst, &(adev->umc.active_mask), + adev->umc.node_inst_num * adev->umc.umc_inst_num) { + node_inst = umc_node_inst / adev->umc.umc_inst_num; + umc_inst = umc_node_inst % adev->umc.umc_inst_num; + LOOP_UMC_CH_INST(ch_inst) { + dev_dbg(adev->dev, + "node_inst :%d umc_inst: %d ch_inst: %d", + node_inst, umc_inst, ch_inst); + ret = func(adev, node_inst, umc_inst, ch_inst, data); + if (ret) { + dev_err(adev->dev, + "Node %d umc %d ch %d func returns %d\n", + node_inst, umc_inst, ch_inst, ret); + return ret; + } + } + } + + return 0; +} + int amdgpu_umc_loop_channels(struct amdgpu_device *adev, umc_func func, void *data) { @@ -391,6 +430,9 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev, uint32_t ch_inst = 0; int ret = 0;
+ if (adev->aid_mask) + return amdgpu_umc_loop_all_aid(adev, func, data); + if (adev->umc.node_inst_num) { LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) { ret = func(adev, node_inst, umc_inst, ch_inst, data); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 0357fea8ae1d..1f06b22dbe7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -63,6 +63,23 @@ #define regPC_CONFIG_CNTL_1 0x194d #define regPC_CONFIG_CNTL_1_BASE_IDX 1
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100 +#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000 +#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000 +#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01 +#define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000 +#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000 +#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000 + +#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006 +#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 +#define regCP_MQD_CONTROL_DEFAULT 0x00000100 +#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509 +#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 +#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000 +#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501 +#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000 + MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); @@ -3896,7 +3913,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev, if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH) priority = 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); + tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority); mqd->cp_gfx_hqd_queue_priority = tmp; } @@ -3918,14 +3935,14 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); + tmp = regCP_GFX_MQD_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); + tmp = regCP_GFX_HQD_VMID_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); mqd->cp_gfx_hqd_vmid = 0;
@@ -3933,7 +3950,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
/* set up time quantum */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); + tmp = regCP_GFX_HQD_QUANTUM_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); mqd->cp_gfx_hqd_quantum = tmp;
@@ -3955,7 +3972,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ rb_bufsz = order_base_2(prop->queue_size / 4) - 1; - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); + tmp = regCP_GFX_HQD_CNTL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); #ifdef __BIG_ENDIAN @@ -3964,7 +3981,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */ - tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); + tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT; if (prop->use_doorbell) { tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_OFFSET, prop->doorbell_index); @@ -3976,7 +3993,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); + mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */ mqd->cp_gfx_hqd_active = 1; @@ -4062,14 +4079,14 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); + tmp = regCP_HQD_EOP_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) { tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -4098,7 +4115,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */ - tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); + tmp = regCP_MQD_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); mqd->cp_mqd_control = tmp;
@@ -4108,7 +4125,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); + tmp = regCP_HQD_PQ_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, (order_base_2(prop->queue_size / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, @@ -4134,7 +4151,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = 0; /* enable the doorbell if requested */ if (prop->use_doorbell) { - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_OFFSET, prop->doorbell_index);
@@ -4149,17 +4166,17 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); + mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */ mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); + tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); + tmp = regCP_HQD_IB_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 241619ee10e4..adcfcf594286 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -52,6 +52,24 @@
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100 +#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000 +#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000 +#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01 +#define regCP_GFX_HQD_CNTL_DEFAULT 0x00f00000 +#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000 +#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000 + +#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006 +#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 +#define regCP_MQD_CONTROL_DEFAULT 0x00000100 +#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509 +#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000 +#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000 +#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501 +#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000 + + MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin"); @@ -2851,25 +2869,25 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); + tmp = regCP_GFX_MQD_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); + tmp = regCP_GFX_HQD_VMID_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); mqd->cp_gfx_hqd_vmid = 0;
/* set up default queue priority level * 0x0 = low priority, 0x1 = high priority */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); + tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); mqd->cp_gfx_hqd_queue_priority = tmp;
/* set up time quantum */ - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); + tmp = regCP_GFX_HQD_QUANTUM_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); mqd->cp_gfx_hqd_quantum = tmp;
@@ -2891,7 +2909,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ rb_bufsz = order_base_2(prop->queue_size / 4) - 1; - tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); + tmp = regCP_GFX_HQD_CNTL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); #ifdef __BIG_ENDIAN @@ -2900,7 +2918,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */ - tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); + tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT; if (prop->use_doorbell) { tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_OFFSET, prop->doorbell_index); @@ -2912,7 +2930,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); + mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */ mqd->cp_gfx_hqd_active = 1; @@ -3007,14 +3025,14 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); + tmp = regCP_HQD_EOP_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, (order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) { tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -3043,7 +3061,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */ - tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); + tmp = regCP_MQD_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); mqd->cp_mqd_control = tmp;
@@ -3053,7 +3071,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); + tmp = regCP_HQD_PQ_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, (order_base_2(prop->queue_size / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, @@ -3078,7 +3096,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = 0; /* enable the doorbell if requested */ if (prop->use_doorbell) { - tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_OFFSET, prop->doorbell_index);
@@ -3093,17 +3111,17 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ - mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); + mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */ mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); + tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */ - tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); + tmp = regCP_HQD_IB_CONTROL_DEFAULT; tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 0e3ddea7b8e0..a7bfc9f41d0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value;
- /* Program the AGP BAR */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the AGP BAR */ + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + /* Program the system aperture low logical page number. */ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 9a212413c6d3..78c527b56f7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1461,7 +1461,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM; adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; - adev->umc.active_mask = adev->aid_mask; adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) adev->umc.ras = &umc_v12_0_ras; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 7a773fcd7752..49113df8baef 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -690,7 +690,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes) { - int size = 128 * PAGE_SIZE; + int size = 128 * AMDGPU_GPU_PAGE_SIZE; int ret = 0; struct amdgpu_device *adev = mes->adev; union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 9689e2b5d4e5..2adee2b94c37 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp); }
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */ +static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev) +{ + uint32_t tmp; + int i; + uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; + + for (i = 0; i < 5; i++) { /* DAGB instances */ + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance); + tmp |= (1 << 15); /* SDMA client is BIT15 */ + WREG32_SOC15_OFFSET(MMHUB, 0, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp); + + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance); + tmp |= (1 << 15); + WREG32_SOC15_OFFSET(MMHUB, 0, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp); + } + +} + static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; @@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) mmhub_v1_7_init_system_aperture_regs(adev); mmhub_v1_7_init_tlb_regs(adev); mmhub_v1_7_init_cache_regs(adev); + mmhub_v1_7_init_snoop_override_regs(adev);
mmhub_v1_7_enable_system_domain(adev); mmhub_v1_7_disable_identity_aperture(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index b01bb759d0f4..2276c644a697 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -214,6 +214,32 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) } }
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */ +static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev) +{ + uint32_t tmp, inst_mask; + int i, j; + uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; + + inst_mask = adev->aid_mask; + for_each_inst(i, inst_mask) { + for (j = 0; j < 5; j++) { /* DAGB instances */ + tmp = RREG32_SOC15_OFFSET(MMHUB, i, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance); + tmp |= (1 << 15); /* SDMA client is BIT15 */ + WREG32_SOC15_OFFSET(MMHUB, i, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp); + + tmp = RREG32_SOC15_OFFSET(MMHUB, i, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance); + tmp |= (1 << 15); + WREG32_SOC15_OFFSET(MMHUB, i, + regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp); + } + } +} + static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp, inst_mask; @@ -419,6 +445,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) mmhub_v1_8_init_system_aperture_regs(adev); mmhub_v1_8_init_tlb_regs(adev); mmhub_v1_8_init_cache_regs(adev); + mmhub_v1_8_init_snoop_override_regs(adev);
mmhub_v1_8_enable_system_domain(adev); mmhub_v1_8_disable_identity_aperture(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index ff1b58e44689..fe0710b55c3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); }
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */ +static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid) +{ + uint32_t tmp; + int i; + uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE - + mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE; + uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET; + + for (i = 0; i < 5 - (2 * hubid); i++) { + /* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */ + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, + huboffset + i * distance); + tmp |= (1 << 15); /* SDMA client is BIT15 */ + WREG32_SOC15_OFFSET(MMHUB, 0, + mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, + huboffset + i * distance, tmp); + + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, + huboffset + i * distance); + tmp |= (1 << 15); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, + huboffset + i * distance, tmp); + } + +} + static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid) { uint32_t tmp; @@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) mmhub_v9_4_init_cache_regs(adev, i);
+ mmhub_v9_4_init_snoop_override_regs(adev, i); mmhub_v9_4_enable_system_domain(adev, i); if (!amdgpu_sriov_vf(adev)) mmhub_v9_4_disable_identity_aperture(adev, i); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 4f94a119d627..ab0eecbab412 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -141,23 +141,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { };
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, };
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, };
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index bba35880badb..04a1b2a46368 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = { };
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, };
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, };
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 951b87e7e3f6..6a58dd8d2130 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2453,14 +2453,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, return retval; }
-/* - * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to - * stay in user mode. - */ -#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL -/* APE1 limit is inclusive and 64K aligned. */ -#define APE1_LIMIT_ALIGNMENT 0xFFFF - static bool set_cache_memory_policy(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, @@ -2475,34 +2467,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (alternate_aperture_size == 0) { - /* base > limit disables APE1 */ - qpd->sh_mem_ape1_base = 1; - qpd->sh_mem_ape1_limit = 0; - } else { - /* - * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, - * SH_MEM_APE1_BASE[31:0], 0x0000 } - * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, - * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } - * Verify that the base and size parameters can be - * represented in this format and convert them. - * Additionally restrict APE1 to user-mode addresses. - */ - - uint64_t base = (uintptr_t)alternate_aperture_base; - uint64_t limit = base + alternate_aperture_size - 1; - - if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || - (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { - retval = false; - goto out; - } - - qpd->sh_mem_ape1_base = base >> 16; - qpd->sh_mem_ape1_limit = limit >> 16; - } - retval = dqm->asic_ops.set_cache_memory_policy( dqm, qpd, @@ -2511,6 +2475,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, alternate_aperture_base, alternate_aperture_size);
+ if (retval) + goto out; + if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) program_sh_mem_settings(dqm, qpd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index d4d95c7f2e5d..32bedef912b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -27,6 +27,14 @@ #include "oss/oss_2_4_sh_mask.h" #include "gca/gfx_7_2_sh_mask.h"
+/* + * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to + * stay in user mode. + */ +#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL +/* APE1 limit is inclusive and 64K aligned. */ +#define APE1_LIMIT_ALIGNMENT 0xFFFF + static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, @@ -84,6 +92,36 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, { uint32_t default_mtype; uint32_t ape1_mtype; + unsigned int temp; + bool retval = true; + + if (alternate_aperture_size == 0) { + /* base > limit disables APE1 */ + qpd->sh_mem_ape1_base = 1; + qpd->sh_mem_ape1_limit = 0; + } else { + /* + * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, + * SH_MEM_APE1_BASE[31:0], 0x0000 } + * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, + * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } + * Verify that the base and size parameters can be + * represented in this format and convert them. + * Additionally restrict APE1 to user-mode addresses. + */ + + uint64_t base = (uintptr_t)alternate_aperture_base; + uint64_t limit = base + alternate_aperture_size - 1; + + if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || + (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { + retval = false; + goto out; + } + + qpd->sh_mem_ape1_base = base >> 16; + qpd->sh_mem_ape1_limit = limit >> 16; + }
default_mtype = (default_policy == cache_policy_coherent) ? MTYPE_NONCACHED : @@ -97,37 +135,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | DEFAULT_MTYPE(default_mtype) | APE1_MTYPE(ape1_mtype); - - return true; -} - -static int update_qpd_cik(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) -{ - struct kfd_process_device *pdd; - unsigned int temp; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) | - DEFAULT_MTYPE(MTYPE_NONCACHED) | - APE1_MTYPE(MTYPE_NONCACHED); - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } - /* On dGPU we're always in GPUVM64 addressing mode with 64-bit * aperture addresses. */ - temp = get_sh_mem_bases_nybble_64(pdd); + temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd)); qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n", qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+out: + return retval; +} + +static int update_qpd_cik(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ return 0; }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c index 245a90dfc2f6..b5f5f141353b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c @@ -31,10 +31,17 @@ static int update_qpd_v10(struct device_queue_manager *dqm, struct qcm_process_device *qpd); static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); +static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size);
void device_queue_manager_init_v10( struct device_queue_manager_asic_ops *asic_ops) { + asic_ops->set_cache_memory_policy = set_cache_memory_policy_v10; asic_ops->update_qpd = update_qpd_v10; asic_ops->init_sdma_vm = init_sdma_vm_v10; asic_ops->mqd_manager_init = mqd_manager_init_v10; @@ -49,27 +56,27 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) private_base; }
-static int update_qpd_v10(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) +static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) { - struct kfd_process_device *pdd; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - (SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | - (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } - - qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); + qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); + return true; +}
+static int update_qpd_v10(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ return 0; }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c index 2e129da7acb4..f436878d0d62 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c @@ -30,10 +30,17 @@ static int update_qpd_v11(struct device_queue_manager *dqm, struct qcm_process_device *qpd); static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); +static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size);
void device_queue_manager_init_v11( struct device_queue_manager_asic_ops *asic_ops) { + asic_ops->set_cache_memory_policy = set_cache_memory_policy_v11; asic_ops->update_qpd = update_qpd_v11; asic_ops->init_sdma_vm = init_sdma_vm_v11; asic_ops->mqd_manager_init = mqd_manager_init_v11; @@ -48,28 +55,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) private_base; }
-static int update_qpd_v11(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) +static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) { - struct kfd_process_device *pdd; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - (SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | - (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); - - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } + qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); + return true; +}
+static int update_qpd_v11(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ return 0; }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c index 4f3295b29dfb..62ca1c8fcbaf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c @@ -30,10 +30,17 @@ static int update_qpd_v12(struct device_queue_manager *dqm, struct qcm_process_device *qpd); static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); +static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size);
void device_queue_manager_init_v12( struct device_queue_manager_asic_ops *asic_ops) { + asic_ops->set_cache_memory_policy = set_cache_memory_policy_v12; asic_ops->update_qpd = update_qpd_v12; asic_ops->init_sdma_vm = init_sdma_vm_v12; asic_ops->mqd_manager_init = mqd_manager_init_v12; @@ -48,28 +55,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) private_base; }
-static int update_qpd_v12(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) +static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) { - struct kfd_process_device *pdd; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - (SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | - (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); - - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } + qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); + return true; +}
+static int update_qpd_v12(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ return 0; }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 210bcc048f4c..d85eadaa1e11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -30,10 +30,17 @@ static int update_qpd_v9(struct device_queue_manager *dqm, struct qcm_process_device *qpd); static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd); +static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size);
void device_queue_manager_init_v9( struct device_queue_manager_asic_ops *asic_ops) { + asic_ops->set_cache_memory_policy = set_cache_memory_policy_v9; asic_ops->update_qpd = update_qpd_v9; asic_ops->init_sdma_vm = init_sdma_vm_v9; asic_ops->mqd_manager_init = mqd_manager_init_v9; @@ -48,10 +55,36 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) private_base; }
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + enum cache_policy default_policy, + enum cache_policy alternate_policy, + void __user *alternate_aperture_base, + uint64_t alternate_aperture_size) +{ + qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; + + if (dqm->dev->kfd->noretry) + qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; + + if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) || + KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4)) + qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT); + + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd)); + + pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases, + qpd->sh_mem_config); + return true; +} + static int update_qpd_v9(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - struct kfd_process_device *pdd; + struct kfd_process_device *pdd = qpd_to_pdd(qpd);
pdd = qpd_to_pdd(qpd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c index b291ee0fab94..320518f41890 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c @@ -27,6 +27,14 @@ #include "gca/gfx_8_0_sh_mask.h" #include "oss/oss_3_0_sh_mask.h"
+/* + * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to + * stay in user mode. + */ +#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL +/* APE1 limit is inclusive and 64K aligned. */ +#define APE1_LIMIT_ALIGNMENT 0xFFFF + static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, @@ -85,6 +93,36 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, { uint32_t default_mtype; uint32_t ape1_mtype; + unsigned int temp; + bool retval = true; + + if (alternate_aperture_size == 0) { + /* base > limit disables APE1 */ + qpd->sh_mem_ape1_base = 1; + qpd->sh_mem_ape1_limit = 0; + } else { + /* + * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, + * SH_MEM_APE1_BASE[31:0], 0x0000 } + * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, + * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } + * Verify that the base and size parameters can be + * represented in this format and convert them. + * Additionally restrict APE1 to user-mode addresses. + */ + + uint64_t base = (uintptr_t)alternate_aperture_base; + uint64_t limit = base + alternate_aperture_size - 1; + + if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || + (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { + retval = false; + goto out; + } + + qpd->sh_mem_ape1_base = base >> 16; + qpd->sh_mem_ape1_limit = limit >> 16; + }
default_mtype = (default_policy == cache_policy_coherent) ? MTYPE_UC : @@ -100,40 +138,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
- return true; -} - -static int update_qpd_vi(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) -{ - struct kfd_process_device *pdd; - unsigned int temp; - - pdd = qpd_to_pdd(qpd); - - /* check if sh_mem_config register already configured */ - if (qpd->sh_mem_config == 0) { - qpd->sh_mem_config = - SH_MEM_ALIGNMENT_MODE_UNALIGNED << - SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | - MTYPE_UC << - SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | - MTYPE_UC << - SH_MEM_CONFIG__APE1_MTYPE__SHIFT; - - qpd->sh_mem_ape1_limit = 0; - qpd->sh_mem_ape1_base = 0; - } - /* On dGPU we're always in GPUVM64 addressing mode with 64-bit * aperture addresses. */ - temp = get_sh_mem_bases_nybble_64(pdd); + temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd)); qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n", temp, qpd->sh_mem_bases); +out: + return retval; +}
+static int update_qpd_vi(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ return 0; }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 0ec8b457494b..45923da7709f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -842,6 +842,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) return ERR_PTR(-EINVAL); }
+ /* If the process just called exec(3), it is possible that the + * cleanup of the kfd_process (following the release of the mm + * of the old process image) is still in the cleanup work queue. + * Make sure to drain any job before trying to recreate any + * resource for this process. + */ + flush_workqueue(kfd_process_wq); + /* * take kfd processes mutex before starting of process creation * so there won't be a case where two threads of the same process @@ -860,14 +868,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) if (process) { pr_debug("Process already found\n"); } else { - /* If the process just called exec(3), it is possible that the - * cleanup of the kfd_process (following the release of the mm - * of the old process image) is still in the cleanup work queue. - * Make sure to drain any job before trying to recreate any - * resource for this process. - */ - flush_workqueue(kfd_process_wq); - process = create_process(thread); if (IS_ERR(process)) goto out; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b50283864dcd..f00d41be7fca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3014,7 +3014,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
/* check if this page fault time stamp is before svms->checkpoint_ts */ if (svms->checkpoint_ts[gpuidx] != 0) { - if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) { + if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) { pr_debug("draining retry fault, drop fault 0x%llx\n", addr); r = -EAGAIN; goto out_unlock_svms; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3871591c9aec..82da568604b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1683,17 +1683,32 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, int cache_type, unsigned int cu_processor_id, struct kfd_node *knode) { - unsigned int cu_sibling_map_mask; + unsigned int cu_sibling_map_mask = 0; int first_active_cu; int i, j, k, xcc, start, end; int num_xcc = NUM_XCC(knode->xcc_mask); struct kfd_cache_properties *pcache = NULL; enum amdgpu_memory_partition mode; struct amdgpu_device *adev = knode->adev; + bool found = false;
start = ffs(knode->xcc_mask) - 1; end = start + num_xcc; - cu_sibling_map_mask = cu_info->bitmap[start][0][0]; + + /* To find the bitmap in the first active cu in the first + * xcc, it is based on the assumption that evrey xcc must + * have at least one active cu. + */ + for (i = 0; i < gfx_info->max_shader_engines && !found; i++) { + for (j = 0; j < gfx_info->max_sh_per_se && !found; j++) { + if (cu_info->bitmap[start][i % 4][j % 4]) { + cu_sibling_map_mask = + cu_info->bitmap[start][i % 4][j % 4]; + found = true; + } + } + } + cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1); first_active_cu = ffs(cu_sibling_map_mask); @@ -2002,10 +2017,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
- if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0)) - dev->node_props.capability |= - HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; - if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(12, 0, 0)) dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ff33760aa4fa..5f9452b22596 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -365,6 +365,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev, static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, struct dm_crtc_state *new_state) { + if (new_state->stream->adjust.timing_adjust_pending) + return true; if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) return true; else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) @@ -3313,11 +3315,6 @@ static int dm_resume(void *handle)
return 0; } - - /* leave display off for S4 sequence */ - if (adev->in_s4) - return 0; - /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); dm_state->context = dc_state_create(dm->dc, NULL); @@ -5550,9 +5547,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
case DRM_COLOR_YCBCR_BT2020: if (full_range) - *color_space = COLOR_SPACE_2020_YCBCR; + *color_space = COLOR_SPACE_2020_YCBCR_FULL; else - return -EINVAL; + *color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break;
default: @@ -6048,7 +6045,7 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) color_space = COLOR_SPACE_2020_RGB_FULLRANGE; else - color_space = COLOR_SPACE_2020_YCBCR; + color_space = COLOR_SPACE_2020_YCBCR_LIMITED; break; case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 default: @@ -7397,12 +7394,12 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, }
struct dc_stream_state * -create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, +create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream) { - struct drm_connector *connector = &aconnector->base; + struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dc_stream_state *stream; const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; @@ -7413,8 +7410,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (!dm_state) return NULL;
- if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || - aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + + if (aconnector && + (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A || + aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) bpc_limit = 8;
do { @@ -7426,10 +7427,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; }
- if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + dc_result = dc_validate_stream(adev->dm.dc, stream); + + if (!aconnector) /* writeback connector */ return stream;
- dc_result = dc_validate_stream(adev->dm.dc, stream); if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
@@ -7459,7 +7461,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, __func__, __LINE__);
aconnector->force_yuv420_output = true; - stream = create_validate_stream_for_sink(aconnector, drm_mode, + stream = create_validate_stream_for_sink(connector, drm_mode, dm_state, old_stream); aconnector->force_yuv420_output = false; } @@ -7474,6 +7476,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec struct dc_sink *dc_sink; /* TODO: Unhardcode stream count */ struct dc_stream_state *stream; + /* we always have an amdgpu_dm_connector here since we got + * here via the amdgpu_dm_connector_helper_funcs + */ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || @@ -7498,7 +7503,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
drm_mode_set_crtcinfo(mode, 0);
- stream = create_validate_stream_for_sink(aconnector, mode, + stream = create_validate_stream_for_sink(connector, mode, to_dm_connector_state(connector->state), NULL); if (stream) { @@ -8278,7 +8283,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO;
- if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + if (!ddc_service->ddc_pin) return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); @@ -10518,7 +10523,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) goto skip_modeset;
- new_stream = create_validate_stream_for_sink(aconnector, + new_stream = create_validate_stream_for_sink(connector, &new_crtc_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 20ad72d1b0d9..9603352ee094 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -987,7 +987,7 @@ int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
struct dc_stream_state * - create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + create_validate_stream_for_sink(struct drm_connector *connector, const struct drm_display_mode *drm_mode, const struct dm_connector_state *dm_state, const struct dc_stream_state *old_stream); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 98e88903d07d..15d94d2a0e2f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1145,7 +1145,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) case COLOR_SPACE_2020_RGB_FULLRANGE: seq_puts(m, "BT2020_RGB"); break; - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: seq_puts(m, "BT2020_YCC"); break; default: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index fca0c31e14d8..92158009cfa7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1646,7 +1646,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (ind >= 0) { struct drm_connector *connector; - struct amdgpu_dm_connector *aconnector; struct drm_connector_state *drm_new_conn_state; struct dm_connector_state *dm_new_conn_state; struct dm_crtc_state *dm_old_crtc_state; @@ -1654,15 +1653,14 @@ int pre_validate_dsc(struct drm_atomic_state *state, connector = amdgpu_dm_find_first_crtc_matching_connector(state, state->crtcs[ind].ptr); - aconnector = to_amdgpu_dm_connector(connector); drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); + connector); dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
local_dc_state->streams[i] = - create_validate_stream_for_sink(aconnector, + create_validate_stream_for_sink(connector, &state->crtcs[ind].new_state->mode, dm_new_conn_state, dm_old_crtc_state->stream); diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c index b2fc4f8e6482..a51c2701da24 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c @@ -40,7 +40,8 @@ bool is_rgb_cspace(enum dc_color_space output_color_space) case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR601_LIMITED: case COLOR_SPACE_YCBCR709_LIMITED: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: + case COLOR_SPACE_2020_YCBCR_FULL: return false; default: /* Add a case to switch */ diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 7d18f372ce7a..6bc59b7ef007 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -101,7 +101,6 @@ static void init_dig_encoder_control(struct bios_parser *bp) bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5; break; default: - dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; break; } @@ -238,7 +237,6 @@ static void init_transmitter_control(struct bios_parser *bp) bp->cmd_tbl.transmitter_control = transmitter_control_v1_7; break; default: - dm_output_to_console("Don't have transmitter_control for v%d\n", crev); bp->cmd_tbl.transmitter_control = transmitter_control_fallback; break; } @@ -408,8 +406,6 @@ static void init_set_pixel_clock(struct bios_parser *bp) bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7; break; default: - dm_output_to_console("Don't have set_pixel_clock for v%d\n", - BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; break; } @@ -554,7 +550,6 @@ static void init_set_crtc_timing(struct bios_parser *bp) set_crtc_using_dtd_timing_v3; break; default: - dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version); bp->cmd_tbl.set_crtc_timing = NULL; break; } @@ -671,8 +666,6 @@ static void init_enable_crtc(struct bios_parser *bp) bp->cmd_tbl.enable_crtc = enable_crtc_v1; break; default: - dm_output_to_console("Don't have enable_crtc for v%d\n", - BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)); bp->cmd_tbl.enable_crtc = NULL; break; } @@ -864,8 +857,6 @@ static void init_set_dce_clock(struct bios_parser *bp) bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1; break; default: - dm_output_to_console("Don't have set_dce_clock for v%d\n", - BIOS_CMD_TABLE_PARA_REVISION(setdceclock)); bp->cmd_tbl.set_dce_clock = NULL; break; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 73458e295103..df8139bda142 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -87,8 +87,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true;
default: - /* Unsupported DCE */ - BREAK_TO_DEBUGGER(); + *h = dal_cmd_tbl_helper_dce112_get_table2(); return false; } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index a0fb4481d2f1..e4d22f74f986 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -194,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK) new_clocks->dppclk_khz = MIN_DPP_DISP_CLK; - if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK) - new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -204,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; }
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - /* No need to apply the w/a if we haven't taken over from bios yet */ - if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, context, true); + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz;
+ dcn315_disable_otg_wa(clk_mgr_base, context, true); + + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, context, false); + dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index c3e50c3aaa60..49efea0c8fcf 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. if (new_clocks->dppclk_khz < 100000) new_clocks->dppclk_khz = 100000; - if (new_clocks->dispclk_khz < 100000) - new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) @@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; }
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 7d0d8852ce8d..a4ac601a30c3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -452,14 +452,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; }
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
- if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz) - new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz; + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); + dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index 8cfc5f435937..313e52997596 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -24,6 +24,8 @@
#include "dml/dcn401/dcn401_fpu.h"
+#define DCN_BASE__INST0_SEG1 0x000000C0 + #define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37 #define mmCLK01_CLK0_CLK0_DFS_CNTL 0x16E69 #define mmCLK01_CLK0_CLK1_DFS_CNTL 0x16E6C diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 216b525bd75e..a99d3e2256f1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -276,6 +276,7 @@ static bool create_links( link->link_id.type = OBJECT_TYPE_CONNECTOR; link->link_id.id = CONNECTOR_ID_VIRTUAL; link->link_id.enum_id = ENUM_ID_1; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) { @@ -438,9 +439,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * Don't adjust DRR while there's bandwidth optimizations pending to * avoid conflicting with firmware updates. */ - if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->ctx->dce_version > DCE_VERSION_MAX) { + if (dc->optimized_required || dc->wm_optimized_required) { + stream->adjust.timing_adjust_pending = true; return false; + } + }
dc_exit_ips_for_hw_access(dc);
@@ -452,6 +456,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->caps.max_v_total != 0 && (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { + stream->adjust.timing_adjust_pending = false; if (adjust->allow_otg_v_count_halt) return set_long_vtotal(dc, stream, adjust); else @@ -465,7 +470,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, dc->hwss.set_drr(&pipe, 1, *adjust); - + stream->adjust.timing_adjust_pending = false; return true; } } @@ -2975,8 +2980,14 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_active_fixed) stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust) + if (update->crtc_timing_adjust) { + if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min || + stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max || + stream->adjust.timing_adjust_pending) + update->crtc_timing_adjust->timing_adjust_pending = true; stream->adjust = *update->crtc_timing_adjust; + update->crtc_timing_adjust->timing_adjust_pending = false; + }
if (update->dpms_off) stream->dpms_off = *update->dpms_off; @@ -4734,7 +4745,8 @@ static bool full_update_required(struct dc *dc, stream_update->lut3d_func || stream_update->pending_test_pattern || stream_update->crtc_timing_adjust || - stream_update->scaler_sharpener_update)) + stream_update->scaler_sharpener_update || + stream_update->hw_cursor_req)) return true;
if (stream) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index bb766c2a7417..d62b00314682 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -176,7 +176,7 @@ static bool is_ycbcr2020_type( { bool ret = false;
- if (color_space == COLOR_SPACE_2020_YCBCR) + if (color_space == COLOR_SPACE_2020_YCBCR_LIMITED || color_space == COLOR_SPACE_2020_YCBCR_FULL) ret = true; return ret; } @@ -247,7 +247,8 @@ void color_space_to_black_color( case COLOR_SPACE_YCBCR709_BLACK: case COLOR_SPACE_YCBCR601_LIMITED: case COLOR_SPACE_YCBCR709_LIMITED: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: + case COLOR_SPACE_2020_YCBCR_FULL: *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV]; break;
@@ -508,6 +509,7 @@ void set_p_state_switch_method( if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba) return;
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN; if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { /* MCLK switching is supported */ @@ -554,6 +556,21 @@ void set_p_state_switch_method( } }
+void set_drr_and_clear_adjust_pending( + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream, + struct drr_params *params) +{ + /* params can be null.*/ + if (pipe_ctx && pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, params); + + if (stream) + stream->adjust.timing_adjust_pending = false; +} + void get_fams2_visual_confirm_color( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index bfcbbea37729..6dbf139c51f7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -4215,7 +4215,7 @@ static void set_avi_info_frame( break; case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_RGB_LIMITEDRANGE: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; break; @@ -4229,7 +4229,7 @@ static void set_avi_info_frame( break; }
- if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && + if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR_LIMITED && stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { hdmi_info.bits.EC0_EC2 = 0; hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 41bd95e9177a..223c3d55544b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -959,6 +959,14 @@ union dp_128b_132b_supported_lttpr_link_rates { uint8_t raw; };
+union dp_alpm_lttpr_cap { + struct { + uint8_t AUX_LESS_ALPM_SUPPORTED :1; + uint8_t RESERVED :7; + } bits; + uint8_t raw; +}; + union dp_sink_video_fallback_formats { struct { uint8_t dp_1024x768_60Hz_24bpp_support :1; @@ -1103,6 +1111,7 @@ struct dc_lttpr_caps { uint8_t max_ext_timeout; union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; + union dp_alpm_lttpr_cap alpm; uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; };
@@ -1352,6 +1361,9 @@ struct dp_trace { #ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP #define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c #endif +#ifndef DP_LTTPR_ALPM_CAPABILITIES +#define DP_LTTPR_ALPM_CAPABILITIES 0xF0009 +#endif #ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index c10567ec1c81..6fd94c5f6da5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -641,7 +641,8 @@ enum dc_color_space { COLOR_SPACE_YCBCR709_LIMITED, COLOR_SPACE_2020_RGB_FULLRANGE, COLOR_SPACE_2020_RGB_LIMITEDRANGE, - COLOR_SPACE_2020_YCBCR, + COLOR_SPACE_2020_YCBCR_LIMITED, + COLOR_SPACE_2020_YCBCR_FULL, COLOR_SPACE_ADOBERGB, COLOR_SPACE_DCIP3, COLOR_SPACE_DISPLAYNATIVE, @@ -649,6 +650,7 @@ enum dc_color_space { COLOR_SPACE_APPCTRL, COLOR_SPACE_CUSTOMPOINTS, COLOR_SPACE_YCBCR709_BLACK, + COLOR_SPACE_2020_YCBCR = COLOR_SPACE_2020_YCBCR_LIMITED, };
enum dc_dither_option { @@ -1000,6 +1002,7 @@ struct dc_crtc_timing_adjust { uint32_t v_total_mid; uint32_t v_total_mid_frame_num; uint32_t allow_otg_v_count_halt; + uint8_t timing_adjust_pending; };
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index c8bdbbba44ef..1aca9e96c474 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1009,6 +1009,13 @@ struct psr_settings { unsigned int psr_sdp_transmit_line_num_deadline; uint8_t force_ffu_mode; unsigned int psr_power_opt; + + /** + * Some panels cannot handle idle pattern during PSR entry. + * To power down phy before disable stream to avoid sending + * idle pattern. + */ + uint8_t power_down_phy_before_disable_stream; };
enum replay_coasting_vtotal_type { diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b889004509a..62402c7be0a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk( int otg_inst, int dp_hpo_inst) { - /* set the dtbclk_p source */ - dccg401_set_dtbclk_p_src(dccg, src, otg_inst); - /* enabled to select one of the DTBCLKs for pipe */ if (src == REFCLK) dccg401_disable_dpstreamclk(dccg, dp_hpo_inst); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 5c2825bc9a87..654b919465f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -420,7 +420,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( dynamic_range_rgb = 1; /*limited range*/ break; case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: @@ -432,6 +432,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( case COLOR_SPACE_APPCTRL: case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: + default: /* do nothing */ break; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index cae18f8c1c9a..8821153d0ac3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -419,6 +419,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->relock_delay_frame_cnt = 0; if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) copy_settings_data->relock_delay_frame_cnt = 2; + + copy_settings_data->power_down_phy_before_disable_stream = + link->psr_settings.power_down_phy_before_disable_stream; + copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c index f496e952ceec..f8f1e98f646e 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c @@ -393,7 +393,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: @@ -406,6 +406,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: case COLOR_SPACE_YCBCR709_BLACK: + default: /* do nothing */ break; } diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c index 0a27e0942a12..0008816cf155 100644 --- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c @@ -634,7 +634,7 @@ void enc401_stream_encoder_dp_set_stream_attribute( break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: @@ -647,6 +647,7 @@ void enc401_stream_encoder_dp_set_stream_attribute( case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: case COLOR_SPACE_YCBCR709_BLACK: + default: /* do nothing */ break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 47d785204f29..c90dee4e9116 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .dcn_downspread_percent = 0.5, .gpuvm_min_page_size_bytes = 4096, .hostvm_min_page_size_bytes = 4096, - .do_urgent_latency_adjustment = 0, + .do_urgent_latency_adjustment = 1, .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, };
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) @@ -367,6 +367,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, clock_limits[i].socclk_mhz; dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; + + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts; dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = clock_limits[i].dtbclk_mhz; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index a201dbb743d7..79d921adc215 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -401,6 +401,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, clock_limits[i].socclk_mhz; dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts; dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = clock_limits[i].dtbclk_mhz; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index e3e4f40bd412..dcbe327209d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -221,7 +221,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s if (!result) return false;
+ DC_FP_START(); result = dml2_build_mode_programming(mode_programming); + DC_FP_END(); if (!result) return false;
@@ -271,7 +273,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co mode_support->dml2_instance = dml_init->dml2_instance; dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; + DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); + DC_FP_END(); if (!is_supported) return false;
@@ -282,16 +286,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml { bool out = false;
- DC_FP_START(); - /* Use dml_validate_only for fast_validate path */ if (fast_validate) out = dml21_check_mode_support(in_dc, context, dml_ctx); else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
- DC_FP_END(); - return out; }
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index e2a3764d9d18..0090b7bc232b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -3630,13 +3630,12 @@ static unsigned int CalculateMaxVStartup( double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000); unsigned int vblank_actual = timing->v_total - timing->v_active; unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0); - unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line); - unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input; + unsigned int vblank_avail = (timing->vblank_nom == 0) ? vblank_nom_default_in_line : (unsigned int)timing->vblank_nom;
vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
if (timing->interlaced && !ptoi_supported) - max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0)); + max_vstartup_lines = (unsigned int)(math_floor2((vblank_size - 1) / 2.0, 1.0)); else max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0)); #ifdef __DML_VBA_DEBUG__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 0f944fcfd5a5..785226945699 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -159,6 +159,7 @@ struct dml2_clks_table_entry { unsigned int dtbclk_mhz; unsigned int dispclk_mhz; unsigned int dppclk_mhz; + unsigned int dram_speed_mts; /*which is based on wck_ratio*/ };
struct dml2_clks_num_entries { diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 40acebd13e46..abf439e743f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -425,11 +425,6 @@ bool dpp3_get_optimal_number_of_taps( int min_taps_y, min_taps_c; enum lb_memory_config lb_config;
- if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - /* * Set default taps if none are provided * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling @@ -467,6 +462,12 @@ bool dpp3_get_optimal_number_of_taps( else scl_data->taps.h_taps_c = in_taps->h_taps_c;
+ // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + /*Ensure we can support the requested number of vtaps*/ min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c index 678db949cfe3..759b453385c4 100644 --- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -323,7 +323,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute( break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: case COLOR_SPACE_2020_RGB_FULLRANGE: - case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_2020_YCBCR_LIMITED: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: @@ -336,6 +336,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute( case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: case COLOR_SPACE_YCBCR709_BLACK: + default: /* do nothing */ break; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 4fbed0298adf..59457ca24e1d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1064,7 +1064,8 @@ void dce110_edp_backlight_control( DC_LOG_DC("edp_receiver_ready_T9 skipped\n"); }
- if (!enable && link->dpcd_sink_ext_caps.bits.oled) { + if (!enable) { + /*follow oem panel config's requirement*/ pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms; msleep(pre_T11_delay); } @@ -1653,9 +1654,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms);
// DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) @@ -2103,8 +2102,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx, struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) { - if (tg->funcs->set_drr) - tg->funcs->set_drr(tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) if (tg->funcs->set_static_screen_control) tg->funcs->set_static_screen_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index d725af14af37..00be0b26689d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -1112,9 +1112,7 @@ static void dcn10_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; } @@ -3217,8 +3215,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx, struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) { - if (tg->funcs->set_drr) - tg->funcs->set_drr(tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) if (tg->funcs->set_static_screen_control) tg->funcs->set_static_screen_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index f5f1ccd8303c..9c5cdb3b80b5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -952,9 +952,7 @@ enum dc_status dcn20_enable_stream_timing( params.vertical_total_max = stream->adjust.v_total_max; params.vertical_total_mid = stream->adjust.v_total_mid; params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms);
// DRR should set trigger event to monitor surface update event if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) @@ -2822,9 +2820,7 @@ void dcn20_reset_back_end_for_pipe( pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); /* TODO - convert symclk_ref_cnts for otg to a bit map to solve * the case where the same symclk is shared across multiple otg * instances diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 3d4b31bd9946..9aa925a0b3b4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -528,9 +528,7 @@ static void dcn31_reset_back_end_for_pipe( if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
- if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, NULL); + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
link = pipe_ctx->stream->link; /* DPMS may already disable or */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 38755ca77140..ca446e08f6a2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1452,8 +1452,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, num_frames = 2 * (frame_rate % 60); } } - if (tg->funcs->set_drr) - tg->funcs->set_drr(tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms); if (adjust.v_total_max != 0 && adjust.v_total_min != 0) if (tg->funcs->set_static_screen_control) tg->funcs->set_static_screen_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 62f1e597787e..3279f347660c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -844,6 +844,13 @@ enum dc_status dcn401_enable_stream_timing( odm_slice_width, last_odm_slice_width); }
+ /* set DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) { + if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst); + } + } + /* HW program guide assume display already disable * by unplug sequence. OTG assume stop. */ @@ -895,10 +902,7 @@ enum dc_status dcn401_enable_stream_timing( }
hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); - - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); + set_drr_and_clear_adjust_pending(pipe_ctx, stream, ¶ms);
/* Event triggers and num frames initialized for DRR, but can be * later updated for PSR use. Note DRR trigger events are generated @@ -1007,8 +1011,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); } } else { - /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */ - dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst); dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } @@ -1773,3 +1775,128 @@ void dcn401_program_outstanding_updates(struct dc *dc, if (hubbub->funcs->program_compbuf_segments) hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true); } + +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + int i; + struct dc_link *link = pipe_ctx->stream->link; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + + DC_LOGGER_INIT(dc->ctx->logger); + if (pipe_ctx->stream_res.stream_enc == NULL) { + pipe_ctx->stream = NULL; + return; + } + + /* DPMS may already disable or */ + /* dpms_off status is incorrect due to fastboot + * feature. When system resume from S4 with second + * screen only, the dpms_off would be true but + * VBIOS lit up eDP, so check link status too. + */ + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) + dc->link_srv->set_dpms_off(pipe_ctx); + else if (pipe_ctx->stream_res.audio) + dc->hwss.disable_audio_stream(pipe_ctx); + + /* free acquired resources */ + if (pipe_ctx->stream_res.audio) { + /*disable az_endpoint*/ + pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); + + /*free audio*/ + if (dc->caps.dynamic_audio == true) { + /*we have to dynamic arbitrate the audio endpoints*/ + /*we free the resource, need reset is_audio_acquired*/ + update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, + pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; + } + } + + /* by upper caller loop, parent pipe: pipe0, will be reset last. + * back end share by all pipes and will be disable only when disable + * parent pipe. + */ + if (pipe_ctx->top_pipe == NULL) { + + dc->hwss.set_abm_immediate_disable(pipe_ctx); + + pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); + + pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); + if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) + pipe_ctx->stream_res.tg->funcs->set_odm_bypass( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL); + + /* TODO - convert symclk_ref_cnts for otg to a bit map to solve + * the case where the same symclk is shared across multiple otg + * instances + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link->phy_state.symclk_ref_cnts.otg = 0; + if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { + link_hwss->disable_link_output(link, + &pipe_ctx->link_res, pipe_ctx->stream->signal); + link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; + } + + /* reset DTBCLK_P */ + if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) + dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) + break; + + if (i == dc->res_pool->pipe_count) + return; + +/* + * In case of a dangling plane, setting this to NULL unconditionally + * causes failures during reset hw ctx where, if stream is NULL, + * it is expected that the pipe_ctx pointers to pipes and plane are NULL. + */ + pipe_ctx->stream = NULL; + DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", + pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); +} + +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context) +{ + int i; + struct dce_hwseq *hws = dc->hwseq; + + /* Reset Back End*/ + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || + pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index a27e62081685..6256429c8a4f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -84,4 +84,11 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context); +void dcn401_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +void dcn401_reset_hw_ctx_wrap( + struct dc *dc, + struct dc_state *context); #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index a2ca07235c83..d6f36b8e1a26 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -111,7 +111,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, @@ -136,7 +136,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = { .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, - .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, + .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, };
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index ac9205625623..9ae6259f2db1 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -46,6 +46,7 @@ struct dce_hwseq; struct link_resource; struct dc_dmub_cmd; struct pg_block_update; +struct drr_params;
struct subvp_pipe_control_lock_fast_params { struct dc *dc; @@ -509,6 +510,11 @@ void set_p_state_switch_method( struct dc_state *context, struct pipe_ctx *pipe_ctx);
+void set_drr_and_clear_adjust_pending( + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream, + struct drr_params *params); + void hwss_execute_sequence(struct dc *dc, struct block_sequence block_sequence[], int num_steps); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index e1e3142cdc00..62fb2009b302 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -621,7 +621,7 @@ struct dc_state { */ struct bw_context bw_ctx;
- struct block_sequence block_sequence[50]; + struct block_sequence block_sequence[100]; unsigned int block_sequence_steps; struct dc_dmub_cmd dc_dmub_cmd[10]; unsigned int dmub_cmd_count; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 7a1ca1e98059..221645c023b5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -221,6 +221,7 @@ enum dentist_divider_range { CLK_SF(CLK0_CLK_PLL_REQ, FbMult_frac, mask_sh)
#define CLK_REG_LIST_DCN401() \ + SR(DENTIST_DISPCLK_CNTL), \ CLK_SR_DCN401(CLK0_CLK_PLL_REQ, CLK01, 0), \ CLK_SR_DCN401(CLK0_CLK0_DFS_CNTL, CLK01, 0), \ CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 0150f2581ee4..0c5675d1c593 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -119,10 +119,14 @@ static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] = { 0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0, 0x2568, 0x43ee, 0xdbb2 } }, - { COLOR_SPACE_2020_YCBCR, + { COLOR_SPACE_2020_YCBCR_FULL, { 0x2F30, 0x2000, 0, 0xE869, 0xEDB7, 0x2000, 0xFABC, 0xBC6, 0, 0x2000, 0x3C34, 0xE1E6 } }, + { COLOR_SPACE_2020_YCBCR_LIMITED, + { 0x35B9, 0x2543, 0, 0xE2B2, + 0xEB2F, 0x2543, 0xFA01, 0x0B1F, + 0, 0x2543, 0x4489, 0xDB42 } }, { COLOR_SPACE_2020_RGB_LIMITEDRANGE, { 0x35E0, 0x255F, 0, 0xE2B3, 0xEB20, 0x255F, 0xF9FD, 0xB1E, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 885e749cdc6e..842636c7922b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -250,21 +250,21 @@ static uint32_t intersect_frl_link_bw_support( { uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) - if (hdmi_encoded_link_bw.bits.FRL_MODE) { - if (hdmi_encoded_link_bw.bits.BW_48Gbps) - supported_bw_in_kbps = 48000000; - else if (hdmi_encoded_link_bw.bits.BW_40Gbps) - supported_bw_in_kbps = 40000000; - else if (hdmi_encoded_link_bw.bits.BW_32Gbps) - supported_bw_in_kbps = 32000000; - else if (hdmi_encoded_link_bw.bits.BW_24Gbps) - supported_bw_in_kbps = 24000000; - else if (hdmi_encoded_link_bw.bits.BW_18Gbps) - supported_bw_in_kbps = 18000000; - else if (hdmi_encoded_link_bw.bits.BW_9Gbps) - supported_bw_in_kbps = 9000000; - } + /* Skip checking FRL_MODE bit, as certain PCON will clear + * it despite supporting the link BW indicated in the other bits. + */ + if (hdmi_encoded_link_bw.bits.BW_48Gbps) + supported_bw_in_kbps = 48000000; + else if (hdmi_encoded_link_bw.bits.BW_40Gbps) + supported_bw_in_kbps = 40000000; + else if (hdmi_encoded_link_bw.bits.BW_32Gbps) + supported_bw_in_kbps = 32000000; + else if (hdmi_encoded_link_bw.bits.BW_24Gbps) + supported_bw_in_kbps = 24000000; + else if (hdmi_encoded_link_bw.bits.BW_18Gbps) + supported_bw_in_kbps = 18000000; + else if (hdmi_encoded_link_bw.bits.BW_9Gbps) + supported_bw_in_kbps = 9000000;
return supported_bw_in_kbps; } @@ -945,6 +945,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream, * TODO: add MST specific link training routine */ decide_mst_link_settings(link, link_setting); + } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) { + link_setting->lane_count = LANE_COUNT_FOUR; + link_setting->link_rate = LINK_RATE_HIGH3; } else if (link->connector_signal == SIGNAL_TYPE_EDP) { /* enable edp link optimization for DSC eDP case */ if (stream->timing.flags.DSC) { @@ -967,9 +970,6 @@ bool link_decide_link_settings(struct dc_stream_state *stream, } else { edp_decide_link_settings(link, link_setting, req_bw); } - } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) { - link_setting->lane_count = LANE_COUNT_FOUR; - link_setting->link_rate = LINK_RATE_HIGH3; } else { decide_dp_link_settings(link, link_setting, req_bw); } @@ -1495,7 +1495,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) { - uint8_t lttpr_dpcd_data[8] = {0}; + uint8_t lttpr_dpcd_data[10] = {0}; enum dc_status status; bool is_lttpr_present;
@@ -1545,6 +1545,10 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.alpm.raw = + lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + /* If this chip cap is set, at least one retimer must exist in the chain * Override count to 1 if we receive a known bad count (0 or an invalid value) */ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c index bafa52a0165a..17c57cf98ec5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -75,7 +75,8 @@ void dp_disable_link_phy(struct dc_link *link, struct dc *dc = link->ctx->dc;
if (!link->wa_flags.dp_keep_receiver_powered && - !link->skip_implict_edp_power_control) + !link->skip_implict_edp_power_control && + link->type != dc_connection_none) dpcd_write_rx_power_ctrl(link, false);
dc->hwss.disable_link_output(link, link_res, signal); @@ -163,8 +164,9 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource } else { if (link->fec_state == dc_link_fec_ready) { fec_config = 0; - core_link_write_dpcd(link, DP_FEC_CONFIGURATION, - &fec_config, sizeof(fec_config)); + if (link->type != dc_connection_none) + core_link_write_dpcd(link, DP_FEC_CONFIGURATION, + &fec_config, sizeof(fec_config));
link_enc->funcs->fec_set_ready(link_enc, false); link->fec_state = dc_link_fec_not_ready; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 27b881f947e8..9385a32a471b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1769,13 +1769,10 @@ bool perform_link_training_with_retries( is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && (cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low) { + if (is_link_bw_low) DC_LOG_WARNING( "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", __func__, link->link_index, req_bw, link_bw); - - return false; - } }
msleep(delay_between_attempts); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 3bdce32a85e3..ae95ec48e572 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -36,7 +36,8 @@ link->ctx->logger
static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, - const struct dc_link_settings *link_settings) + const struct dc_link_settings *link_settings, + enum lttpr_mode lttpr_mode) { union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 100; @@ -49,6 +50,8 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, DP_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); + if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT) + wait_in_micro_secs = 400; if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; } @@ -110,7 +113,6 @@ void decide_8b_10b_training_settings( */ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; - lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); @@ -119,6 +121,7 @@ void decide_8b_10b_training_settings( lt_settings->disallow_per_lane_settings = true; lt_settings->always_match_dpcd_with_hw_lane_settings = true; lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); + lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode); dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); }
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 3aa05a2be6c0..fa642f4b88c2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -674,6 +674,18 @@ bool edp_setup_psr(struct dc_link *link, if (!link) return false;
+ //Clear PSR cfg + memset(&psr_configuration, 0, sizeof(psr_configuration)); + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_PSR_EN_CFG, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) + return false; + dc = link->ctx->dc; dmcu = dc->res_pool->dmcu; psr = dc->res_pool->psr; @@ -684,9 +696,6 @@ bool edp_setup_psr(struct dc_link *link, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false;
- - memset(&psr_configuration, 0, sizeof(psr_configuration)); - psr_configuration.bits.ENABLE = 1; psr_configuration.bits.CRC_VERIFICATION = 1; psr_configuration.bits.FRAME_CAPTURE_INDICATION = @@ -950,6 +959,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream if (!link) return false;
+ //Clear Replay config + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_SINK_PR_ENABLE_AND_CONFIGURATION, + (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); + + if (!(link->replay_settings.config.replay_supported)) + return false; + + link->replay_settings.config.replay_error_status.raw = 0; + dc = link->ctx->dc;
replay = dc->res_pool->replay; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index f2ce687c0e03..9cb72805b8d1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1699,7 +1699,7 @@ static int dcn315_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + if (pixel_rate_crb) { int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); /* Ceil to crb segment size */ int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( @@ -1756,28 +1756,26 @@ static int dcn315_populate_dml_pipes_from_context( continue; }
- if (!pipe->top_pipe && !pipe->prev_odm_pipe) { - bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) - || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - - if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) - pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + - (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); - if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { - /* Clamp to 2 pipe split max det segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); - pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; - } - if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { - /* If we are splitting we must have an even number of segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; - pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; - } - /* Convert segments into size for DML use */ - pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; - - crb_idx++; + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; + } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + + crb_idx++; pipe_cnt++; } } diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c index 014e8a296f0c..54c7d6aecf51 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c @@ -875,8 +875,8 @@ static bool spl_get_optimal_number_of_taps( bool *enable_isharp) { int num_part_y, num_part_c; - int max_taps_y, max_taps_c; - int min_taps_y, min_taps_c; + unsigned int max_taps_y, max_taps_c; + unsigned int min_taps_y, min_taps_c; enum lb_memory_config lb_config; bool skip_easf = false;
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h index 2a74ff5fdfdb..a2c28949ec47 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h @@ -479,7 +479,7 @@ struct spl_sharpness_range { }; struct adaptive_sharpness { bool enable; - int sharpness_level; + unsigned int sharpness_level; struct spl_sharpness_range sharpness_range; }; enum linear_light_scaling { // convert it in translation logic diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7835100b37c4..d74336830368 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -2869,6 +2869,12 @@ struct dmub_cmd_psr_copy_settings_data { * Some panels request main link off before xth vertical line */ uint16_t poweroff_before_vertical_line; + /** + * Some panels cannot handle idle pattern during PSR entry. + * To power down phy before disable stream to avoid sending + * idle pattern. + */ + uint8_t power_down_phy_before_disable_stream; };
/** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index d9f31b191c69..1a68b5782cac 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -83,8 +83,8 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in, void dmub_dcn31_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 100; - uint32_t in_reset, scratch, i, pwait_mode; + const uint32_t timeout = 100000; + uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -108,7 +108,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) }
for (i = 0; i < timeout; ++i) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break;
@@ -125,9 +125,14 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) /* Force reset in case we timed out, DMCUB is likely hung. */ }
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); + + if (is_enabled) { + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); + REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + } + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 2ccad79053c5..4581eb479451 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -88,7 +88,7 @@ static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in, void dmub_dcn35_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 100; + const uint32_t timeout = 100000; uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); @@ -113,7 +113,7 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) }
for (i = 0; i < timeout; ++i) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index 39a8cb6d7523..e1c4fe1c6e3e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -63,8 +63,10 @@ static inline void dmub_dcn401_translate_addr(const union dmub_addr *addr_in, void dmub_dcn401_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 30; - uint32_t in_reset, scratch, i; + const uint32_t timeout_us = 1 * 1000 * 1000; //1s + const uint32_t poll_delay_us = 1; //1us + uint32_t i = 0; + uint32_t in_reset, scratch, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -75,32 +77,35 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
- /** - * Timeout covers both the ACK and the wait - * for remaining work to finish. - * - * This is mostly bound by the PHY disable sequence. - * Each register check will be greater than 1us, so - * don't bother using udelay. - */ - - for (i = 0; i < timeout; ++i) { + for (i = 0; i < timeout_us; i++) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; + + udelay(poll_delay_us); }
- for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { scratch = dmub->hw_funcs.get_gpint_response(dmub); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; + + udelay(poll_delay_us); }
- /* Force reset in case we timed out, DMCUB is likely hung. */ + for (; i < timeout_us; i++) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(poll_delay_us); + } + } + + if (i >= timeout_us) { + /* timeout should never occur */ + BREAK_TO_DEBUGGER(); }
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -131,7 +136,10 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
dmub_dcn401_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -151,6 +159,7 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */ REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 0x20); } @@ -161,7 +170,10 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub, { union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -181,6 +193,7 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub, DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */ REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 0x20); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h index 4c8843b79695..31f95b27e227 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h @@ -169,7 +169,8 @@ struct dmub_srv; DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN) \ DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK) \ DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_STAT) \ - DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) + DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn401_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index a344e2e49b0e..b3d55cac3569 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -383,10 +383,10 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, colorimetryFormat = ColorimetryYCC_DP_ITU709; else if (cs == COLOR_SPACE_ADOBERGB) colorimetryFormat = ColorimetryYCC_DP_AdobeYCC; - else if (cs == COLOR_SPACE_2020_YCBCR) + else if (cs == COLOR_SPACE_2020_YCBCR_LIMITED) colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
- if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22) + if (cs == COLOR_SPACE_2020_YCBCR_LIMITED && tf == TRANSFER_FUNC_GAMMA_22) colorimetryFormat = ColorimetryYCC_DP_ITU709; break;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h index c488d4a50cf4..b2252deabc17 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h @@ -203,6 +203,10 @@ #define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB0_WR_MISC_CREDIT 0x0058 #define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE 0x005b +#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x005c +#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB0_WRCLI_ASK_PENDING 0x005d #define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB0_WRCLI_GO_PENDING 0x005e @@ -455,6 +459,10 @@ #define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB1_WR_MISC_CREDIT 0x00d8 #define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE 0x00db +#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x00dc +#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB1_WRCLI_ASK_PENDING 0x00dd #define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB1_WRCLI_GO_PENDING 0x00de @@ -707,6 +715,10 @@ #define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB2_WR_MISC_CREDIT 0x0158 #define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE 0x015b +#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x015c +#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB2_WRCLI_ASK_PENDING 0x015d #define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB2_WRCLI_GO_PENDING 0x015e @@ -959,6 +971,10 @@ #define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB3_WR_MISC_CREDIT 0x01d8 #define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE 0x01db +#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x01dc +#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB3_WRCLI_ASK_PENDING 0x01dd #define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB3_WRCLI_GO_PENDING 0x01de @@ -1211,6 +1227,10 @@ #define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB4_WR_MISC_CREDIT 0x0258 #define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE 0x025b +#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x025c +#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB4_WRCLI_ASK_PENDING 0x025d #define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB4_WRCLI_GO_PENDING 0x025e @@ -4793,6 +4813,10 @@ #define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB5_WR_MISC_CREDIT 0x3058 #define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE 0x305b +#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x305c +#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB5_WRCLI_ASK_PENDING 0x305d #define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB5_WRCLI_GO_PENDING 0x305e @@ -5045,6 +5069,10 @@ #define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB6_WR_MISC_CREDIT 0x30d8 #define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE 0x30db +#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x30dc +#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB6_WRCLI_ASK_PENDING 0x30dd #define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB6_WRCLI_GO_PENDING 0x30de @@ -5297,6 +5325,10 @@ #define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1 #define mmDAGB7_WR_MISC_CREDIT 0x3158 #define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE 0x315b +#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1 +#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x315c +#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1 #define mmDAGB7_WRCLI_ASK_PENDING 0x315d #define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1 #define mmDAGB7_WRCLI_GO_PENDING 0x315e diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h index 2969fbf282b7..5069d2fd467f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h @@ -1532,6 +1532,12 @@ //DAGB0_WRCLI_DBUS_GO_PENDING #define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB0_DAGB_DLY #define DAGB0_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB0_DAGB_DLY__CLI__SHIFT 0x8 @@ -3207,6 +3213,12 @@ //DAGB1_WRCLI_DBUS_GO_PENDING #define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB1_DAGB_DLY #define DAGB1_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB1_DAGB_DLY__CLI__SHIFT 0x8 @@ -4882,6 +4894,12 @@ //DAGB2_WRCLI_DBUS_GO_PENDING #define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB2_DAGB_DLY #define DAGB2_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB2_DAGB_DLY__CLI__SHIFT 0x8 @@ -6557,6 +6575,12 @@ //DAGB3_WRCLI_DBUS_GO_PENDING #define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB3_DAGB_DLY #define DAGB3_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB3_DAGB_DLY__CLI__SHIFT 0x8 @@ -8232,6 +8256,12 @@ //DAGB4_WRCLI_DBUS_GO_PENDING #define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB4_DAGB_DLY #define DAGB4_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB4_DAGB_DLY__CLI__SHIFT 0x8 @@ -28737,6 +28767,12 @@ //DAGB5_WRCLI_DBUS_GO_PENDING #define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB5_DAGB_DLY #define DAGB5_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB5_DAGB_DLY__CLI__SHIFT 0x8 @@ -30412,6 +30448,12 @@ //DAGB6_WRCLI_DBUS_GO_PENDING #define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB6_DAGB_DLY #define DAGB6_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB6_DAGB_DLY__CLI__SHIFT 0x8 @@ -32087,6 +32129,12 @@ //DAGB7_WRCLI_DBUS_GO_PENDING #define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 #define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE +#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0 +#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE +#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0 +#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL //DAGB7_DAGB_DLY #define DAGB7_DAGB_DLY__DLY__SHIFT 0x0 #define DAGB7_DAGB_DLY__CLI__SHIFT 0x8 diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 99d2d3092ea5..3fd8da5dc761 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2772,6 +2772,7 @@ int smu_get_power_limit(void *handle, switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 55ed6247eb61..9ac694c4f1f7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -275,8 +275,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu) int var = (adev->pdev->device & 0xF); char ucode_prefix[15];
- /* No need to load P2S tables in IOV mode */ - if (amdgpu_sriov_vf(adev)) + /* No need to load P2S tables in IOV mode or for smu v13.0.12 */ + if (amdgpu_sriov_vf(adev) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))) return 0;
if (!(adev->flags & AMD_IS_APU)) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index ed496fb32bf3..24ed1cd3caf1 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -131,7 +131,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, return false; }
- switch (mode->crtc_hdisplay) { + switch (mode->hdisplay) { case 640: vbios_mode->enh_table = &res_640x480[refresh_rate_index]; break; @@ -145,7 +145,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table = &res_1152x864[refresh_rate_index]; break; case 1280: - if (mode->crtc_vdisplay == 800) + if (mode->vdisplay == 800) vbios_mode->enh_table = &res_1280x800[refresh_rate_index]; else vbios_mode->enh_table = &res_1280x1024[refresh_rate_index]; @@ -157,7 +157,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; break; case 1600: - if (mode->crtc_vdisplay == 900) + if (mode->vdisplay == 900) vbios_mode->enh_table = &res_1600x900[refresh_rate_index]; else vbios_mode->enh_table = &res_1600x1200[refresh_rate_index]; @@ -166,7 +166,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; break; case 1920: - if (mode->crtc_vdisplay == 1080) + if (mode->vdisplay == 1080) vbios_mode->enh_table = &res_1920x1080[refresh_rate_index]; else vbios_mode->enh_table = &res_1920x1200[refresh_rate_index]; @@ -210,6 +210,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+ adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde; adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht; adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder; adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder; @@ -219,6 +220,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table->hfp + vbios_mode->enh_table->hsync);
+ adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde; adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt; adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder; adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index 8f786592143b..24e1e11acf69 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -244,7 +244,9 @@ static const struct hdmi_codec_pdata codec_data = { .ops = &adv7511_codec_ops, .max_i2s_channels = 2, .i2s = 1, + .no_i2s_capture = 1, .spdif = 1, + .no_spdif_capture = 1, };
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 32902f77f00d..40e4e1b6c911 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -574,6 +574,30 @@ mode_valid(struct drm_atomic_state *state) return 0; }
+static int drm_atomic_check_valid_clones(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_encoder *drm_enc; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) { + if (!drm_enc->possible_clones) { + DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id); + continue; + } + + if ((crtc_state->encoder_mask & drm_enc->possible_clones) != + crtc_state->encoder_mask) { + DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n", + crtc->base.id, crtc_state->encoder_mask); + return -EINVAL; + } + } + + return 0; +} + /** * drm_atomic_helper_check_modeset - validate state object for modeset changes * @dev: DRM device @@ -745,6 +769,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, ret = drm_atomic_add_affected_planes(state, crtc); if (ret != 0) return ret; + + ret = drm_atomic_check_valid_clones(state, crtc); + if (ret != 0) + return ret; }
/* diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 103c185bb1c8..ca42e6081d27 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -324,7 +324,7 @@ EXPORT_SYMBOL(drm_buddy_init); */ void drm_buddy_fini(struct drm_buddy *mm) { - u64 root_size, size; + u64 root_size, size, start; unsigned int order; int i;
@@ -332,7 +332,8 @@ void drm_buddy_fini(struct drm_buddy *mm)
for (i = 0; i < mm->n_roots; ++i) { order = ilog2(size) - ilog2(mm->chunk_size); - __force_merge(mm, 0, size, order); + start = drm_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order);
WARN_ON(!drm_buddy_block_is_free(mm->roots[i])); drm_block_free(mm, mm->roots[i]); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 13bc4c290b17..9edb3247c767 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -6596,6 +6596,7 @@ static void drm_reset_display_info(struct drm_connector *connector) info->has_hdmi_infoframe = false; info->rgb_quant_range_selectable = false; memset(&info->hdmi, 0, sizeof(info->hdmi)); + memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0; info->edid_hdmi_ycbcr444_dc_modes = 0; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 149b8e25da5b..426d0867882d 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -322,7 +322,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, return -ENOENT;
/* Don't allow imported objects to be mapped */ - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { ret = -EINVAL; goto out; } @@ -1152,7 +1152,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent, drm_vma_node_start(&obj->vma_node)); drm_printf_indent(p, indent, "size=%zu\n", obj->size); drm_printf_indent(p, indent, "imported=%s\n", - str_yes_no(obj->import_attach)); + str_yes_no(drm_gem_is_imported(obj)));
if (obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 9c11d3158324..20a50180d4d4 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -410,12 +410,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi) { - mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); + if (dpi->conf->reg_h_fre_con) + mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); }
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi) { - if (dpi->conf->edge_sel_en) + if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con) mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN); }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index 9c83bab0a530..fc84ca214f24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -58,7 +58,7 @@ #include <linux/parser.h>
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE -#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16 +#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
struct r535_gsp_msg { u8 auth_tag_buffer[16]; diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 767e47a2b0c1..663af985d1b3 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1983,6 +1983,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
{ /* sentinal */ } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 5880d87fe6b3..5d7df4c3b08c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -157,6 +157,7 @@ struct vop2_video_port { struct drm_crtc crtc; struct vop2 *vop2; struct clk *dclk; + struct clk *dclk_src; unsigned int id; const struct vop2_video_port_data *data;
@@ -211,6 +212,7 @@ struct vop2 { struct clk *hclk; struct clk *aclk; struct clk *pclk; + struct clk *pll_hdmiphy0;
/* optional internal rgb encoder */ struct rockchip_rgb *rgb; @@ -219,6 +221,8 @@ struct vop2 { struct vop2_win win[]; };
+#define VOP2_MAX_DCLK_RATE 600000000 + #define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \ (x) == ROCKCHIP_VOP2_EP_HDMI1)
@@ -1051,6 +1055,9 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
vop2_crtc_disable_irq(vp, VP_INT_DSP_HOLD_VALID);
+ if (vp->dclk_src) + clk_set_parent(vp->dclk, vp->dclk_src); + clk_disable_unprepare(vp->dclk);
vop2->enable_count--; @@ -1432,10 +1439,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
rb_swap = vop2_win_rb_swap(fb->format->format); vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap); - if (!vop2_cluster_window(win)) { - uv_swap = vop2_win_uv_swap(fb->format->format); - vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap); - } + uv_swap = vop2_win_uv_swap(fb->format->format); + vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
if (fb->format->is_yuv) { vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4)); @@ -2073,6 +2078,27 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_MIPI_CTRL, 0);
+ /* + * Switch to HDMI PHY PLL as DCLK source for display modes up + * to 4K@60Hz, if available, otherwise keep using the system CRU. + */ + if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) { + drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); + + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { + if (!vp->dclk_src) + vp->dclk_src = clk_get_parent(vp->dclk); + + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); + if (ret < 0) + drm_warn(vop2->drm, + "Could not switch to HDMI0 PHY PLL: %d\n", ret); + break; + } + } + } + clk_set_rate(vp->dclk, clock);
vop2_post_config(crtc); @@ -3244,6 +3270,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(vop2->pclk); }
+ vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0"); + if (IS_ERR(vop2->pll_hdmiphy0)) { + drm_err(vop2->drm, "failed to get pll_hdmiphy0\n"); + return PTR_ERR(vop2->pll_hdmiphy0); + } + vop2->irq = platform_get_irq(pdev, 0); if (vop2->irq < 0) { drm_err(vop2->drm, "cannot find irq for vop2\n"); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index d7ff1f5fa481..7c17108da7d2 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -286,11 +286,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) return ret;
+ v3d->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(v3d->clk)) + return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n"); + + ret = clk_prepare_enable(v3d->clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the V3D clock\n"); + return ret; + } + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); ret = dma_set_mask_and_coherent(dev, mask); if (ret) - return ret; + goto clk_disable;
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
@@ -310,28 +320,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER) - return ret; + goto clk_disable;
v3d->reset = NULL; ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); if (ret) { dev_err(dev, "Failed to get reset control or bridge regs\n"); - return ret; + goto clk_disable; } }
if (v3d->ver < 41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) - return ret; + goto clk_disable; }
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!v3d->mmu_scratch) { dev_err(dev, "Failed to allocate MMU scratch page\n"); - return -ENOMEM; + ret = -ENOMEM; + goto clk_disable; }
ret = v3d_gem_init(drm); @@ -360,6 +371,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) v3d_gem_destroy(drm); dma_free: dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); +clk_disable: + clk_disable_unprepare(v3d->clk); return ret; }
@@ -377,6 +390,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + + clk_disable_unprepare(v3d->clk); }
static struct platform_driver v3d_platform_driver = { diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 84e327b56925..8acc4640f0a2 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -702,6 +702,21 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, goto out; }
+ /* Reject BO eviction if BO is bound to current VM. */ + if (evict && ctx->resv) { + struct drm_gpuvm_bo *vm_bo; + + drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) { + struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); + + if (xe_vm_resv(vm) == ctx->resv && + xe_vm_in_preempt_fence_mode(vm)) { + ret = -EBUSY; + goto out; + } + } + } + /* * Failed multi-hop where the old_mem is still marked as * TTM_PL_FLAG_TEMPORARY, should just be a dummy move. @@ -1975,6 +1990,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, struct xe_file *xef = to_xe_file(file); struct drm_xe_gem_create *args = data; struct xe_vm *vm = NULL; + ktime_t end = 0; struct xe_bo *bo; unsigned int bo_flags; u32 handle; @@ -2047,6 +2063,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, vm = xe_vm_lookup(xef, args->vm_id); if (XE_IOCTL_DBG(xe, !vm)) return -ENOENT; + } + +retry: + if (vm) { err = xe_vm_lock(vm, true); if (err) goto out_vm; @@ -2060,6 +2080,8 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo)) { err = PTR_ERR(bo); + if (xe_vm_validate_should_retry(NULL, err, &end)) + goto retry; goto out_vm; }
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index fe4319eb13fd..278ce1c37b39 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -147,7 +147,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, return -EINVAL;
if (xe->wedged.mode == wedged_mode) - return 0; + return size;
xe->wedged.mode = wedged_mode;
@@ -156,6 +156,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, ret = xe_guc_ads_scheduler_policy_toggle_reset(>->uc.guc.ads); if (ret) { xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n"); + xe_pm_runtime_put(xe); return -EIO; } } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index bb85208cf1a9..23e02372a49d 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -694,7 +694,9 @@ int xe_device_probe(struct xe_device *xe) }
/* Allocate and map stolen after potential VRAM resize */ - xe_ttm_stolen_mgr_init(xe); + err = xe_ttm_stolen_mgr_init(xe); + if (err) + return err;
/* * Now that GT is initialized (TTM in particular), @@ -706,6 +708,12 @@ int xe_device_probe(struct xe_device *xe) if (err) goto err;
+ for_each_tile(tile, xe, id) { + err = xe_tile_init(tile); + if (err) + goto err; + } + for_each_gt(gt, xe, id) { last_gt = id;
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c index 904cf47925aa..ed9183599e31 100644 --- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c +++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c @@ -28,10 +28,10 @@ "\n" \ "#endif\n"
-static void print_usage(FILE *f) +static void print_usage(FILE *f, const char *progname) { fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n", - program_invocation_short_name); + progname); }
static void print_parse_error(const char *err_msg, const char *line, @@ -144,7 +144,7 @@ int main(int argc, const char *argv[])
if (argc < 3) { fprintf(stderr, "ERROR: wrong arguments\n"); - print_usage(stderr); + print_usage(stderr, argv[0]); return 1; }
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index c9ed996b9cb0..786f0dba4143 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -323,6 +323,26 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) return err; }
+static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset) +{ + int err = 0; + + xe_gt_assert(gt, vfid); + lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); + + if (reset) + err = pf_send_vf_cfg_reset(gt, vfid); + if (!err) + err = pf_push_full_vf_config(gt, vfid); + + return err; +} + +static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid) +{ + return pf_push_vf_cfg(gt, vfid, true); +} + static u64 pf_get_ggtt_alignment(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); @@ -419,6 +439,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) return err;
pf_release_vf_config_ggtt(gt, config); + + err = pf_refresh_vf_cfg(gt, vfid); + if (unlikely(err)) + return err; } xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
@@ -744,6 +768,10 @@ static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctx return ret;
pf_release_config_ctxs(gt, config); + + ret = pf_refresh_vf_cfg(gt, vfid); + if (unlikely(ret)) + return ret; }
if (!num_ctxs) @@ -1041,6 +1069,10 @@ static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs) return ret;
pf_release_config_dbs(gt, config); + + ret = pf_refresh_vf_cfg(gt, vfid); + if (unlikely(ret)) + return ret; }
if (!num_dbs) @@ -2003,10 +2035,7 @@ int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh xe_gt_assert(gt, vfid);
mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); - if (refresh) - err = pf_send_vf_cfg_reset(gt, vfid); - if (!err) - err = pf_push_full_vf_config(gt, vfid); + err = pf_push_vf_cfg(gt, vfid, refresh); mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (unlikely(err)) { diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index f982d6f9f218..29badbd829ab 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -46,12 +46,19 @@ static int guc_action_vf_reset(struct xe_guc *guc) return ret > 0 ? -EPROTO : ret; }
+#define GUC_RESET_VF_STATE_RETRY_MAX 10 static int vf_reset_guc_state(struct xe_gt *gt) { + unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX; struct xe_guc *guc = >->uc.guc; int err;
- err = guc_action_vf_reset(guc); + do { + err = guc_action_vf_reset(guc); + if (!err || err != -ETIMEDOUT) + break; + } while (--retry); + if (unlikely(err)) xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err)); return err; @@ -228,6 +235,9 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt) { int err;
+ if (!xe_device_uc_enabled(gt_to_xe(gt))) + return -ENODEV; + err = vf_reset_guc_state(gt); if (unlikely(err)) return err; diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 98a450271f5c..3155825fa46a 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -406,6 +406,28 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt, return send_tlb_invalidation(>->uc.guc, fence, action, len); }
+/** + * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM + * @gt: graphics tile + * @vm: VM to invalidate + * + * Invalidate entire VM's address space + */ +void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm) +{ + struct xe_gt_tlb_invalidation_fence fence; + u64 range = 1ull << vm->xe->info.va_bits; + int ret; + + xe_gt_tlb_invalidation_fence_init(gt, &fence, true); + + ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid); + if (ret < 0) + return; + + xe_gt_tlb_invalidation_fence_wait(&fence); +} + /** * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA * @gt: graphics tile diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index 672acfcdf0d7..abe9b03d543e 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -12,6 +12,7 @@
struct xe_gt; struct xe_guc; +struct xe_vm; struct xe_vma;
int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt); @@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt); int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, struct xe_vma *vma); +void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm); int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, u64 start, u64 end, u32 asid); diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c index ade6162dc259..0e5b43e1518e 100644 --- a/drivers/gpu/drm/xe/xe_guc_relay.c +++ b/drivers/gpu/drm/xe/xe_guc_relay.c @@ -224,7 +224,7 @@ __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u * with CTB lock held which is marked as used in the reclaim path. * Btw, that's one of the reason why we use mempool here! */ - txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL); + txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_NOWAIT); if (!txn) return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 448766033690..d306ed0a0443 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -535,6 +535,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf, mutex_unlock(&stream->stream_lock); } while (!offset && !ret); } else { + xe_oa_buffer_check_unlocked(stream); mutex_lock(&stream->stream_lock); ret = __xe_oa_read(stream, buf, count, &offset); mutex_unlock(&stream->stream_lock); diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index aaceee748287..09ee8a06fe2e 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -62,6 +62,55 @@ static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) xe_gt_sriov_pf_control_trigger_flr(gt, n); }
+static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + + xe_assert(xe, IS_SRIOV_PF(xe)); + + /* caller must use pci_dev_put() */ + return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + pdev->bus->number, + pci_iov_virtfn_devfn(pdev, vf_id)); +} + +static void pf_link_vfs(struct xe_device *xe, int num_vfs) +{ + struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev); + struct device_link *link; + struct pci_dev *pdev_vf; + unsigned int n; + + /* + * When both PF and VF devices are enabled on the host, during system + * resume they are resuming in parallel. + * + * But PF has to complete the provision of VF first to allow any VFs to + * successfully resume. + * + * Create a parent-child device link between PF and VF devices that will + * enforce correct resume order. + */ + for (n = 1; n <= num_vfs; n++) { + pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1); + + /* unlikely, something weird is happening, abort */ + if (!pdev_vf) { + xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n", + n, str_plural(num_vfs)); + break; + } + + link = device_link_add(&pdev_vf->dev, &pdev_pf->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + /* unlikely and harmless, continue with other VFs */ + if (!link) + xe_sriov_notice(xe, "Failed linking VF%u\n", n); + + pci_dev_put(pdev_vf); + } +} + static int pf_enable_vfs(struct xe_device *xe, int num_vfs) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -92,6 +141,8 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) if (err < 0) goto failed;
+ pf_link_vfs(xe, num_vfs); + xe_sriov_info(xe, "Enabled %u of %u VF%s\n", num_vfs, total_vfs, str_plural(total_vfs)); return num_vfs; diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 230cf47fb9c5..fb94ff55c736 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -217,6 +217,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) xe_pt_free(pt); }
+/** + * xe_pt_clear() - Clear a page-table. + * @xe: xe device. + * @pt: The page-table. + * + * Clears page-table by setting to zero. + */ +void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt) +{ + struct iosys_map *map = &pt->bo->vmap; + + xe_map_memset(xe, map, 0, 0, SZ_4K); +} + /** * DOC: Pagetable building * diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h index 9ab386431cad..8e43912ae8e9 100644 --- a/drivers/gpu/drm/xe/xe_pt.h +++ b/drivers/gpu/drm/xe/xe_pt.h @@ -13,6 +13,7 @@ struct dma_fence; struct xe_bo; struct xe_device; struct xe_exec_queue; +struct xe_svm_range; struct xe_sync_entry; struct xe_tile; struct xe_vm; @@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt); + int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops); struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops); diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index fe2cb2a96f78..1d4254759006 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -57,8 +57,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 } sa_manager->bo = bo; sa_manager->is_iomem = bo->vmap.is_iomem; - - drm_suballoc_manager_init(&sa_manager->base, managed_size, align); sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) { @@ -72,6 +70,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); }
+ drm_suballoc_manager_init(&sa_manager->base, managed_size, align); ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini, sa_manager); if (ret) diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index dda5268507d8..36c87d7c72fb 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -167,17 +167,19 @@ int xe_tile_init_noalloc(struct xe_tile *tile) if (err) return err;
+ xe_wa_apply_tile_workarounds(tile); + + return xe_tile_sysfs_init(tile); +} + +int xe_tile_init(struct xe_tile *tile) +{ tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16); if (IS_ERR(tile->mem.kernel_bb_pool)) return PTR_ERR(tile->mem.kernel_bb_pool);
- xe_wa_apply_tile_workarounds(tile); - - err = xe_tile_sysfs_init(tile); - return 0; } - void xe_tile_migrate_wait(struct xe_tile *tile) { xe_migrate_wait(tile->migrate); diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h index 1c9e42ade6b0..eb939316d55b 100644 --- a/drivers/gpu/drm/xe/xe_tile.h +++ b/drivers/gpu/drm/xe/xe_tile.h @@ -12,6 +12,7 @@ struct xe_tile;
int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id); int xe_tile_init_noalloc(struct xe_tile *tile); +int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index f7113cf6109d..ef84fa757b26 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -201,17 +201,16 @@ static u64 detect_stolen(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) #endif }
-void xe_ttm_stolen_mgr_init(struct xe_device *xe) +int xe_ttm_stolen_mgr_init(struct xe_device *xe) { - struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL); struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + struct xe_ttm_stolen_mgr *mgr; u64 stolen_size, io_size; int err;
- if (!mgr) { - drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n"); - return; - } + mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return -ENOMEM;
if (IS_SRIOV_VF(xe)) stolen_size = 0; @@ -224,7 +223,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (!stolen_size) { drm_dbg_kms(&xe->drm, "No stolen memory support\n"); - return; + return 0; }
/* @@ -240,7 +239,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe) io_size, PAGE_SIZE); if (err) { drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err); - return; + return err; }
drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n", @@ -248,6 +247,8 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (io_size) mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size); + + return 0; }
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset) diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h index 1777245ff810..8e877d1e839b 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h @@ -12,7 +12,7 @@ struct ttm_resource; struct xe_bo; struct xe_device;
-void xe_ttm_stolen_mgr_init(struct xe_device *xe); +int xe_ttm_stolen_mgr_init(struct xe_device *xe); int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem); bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe); u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 872de052d670..de257a032225 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -8,6 +8,7 @@ #include <linux/dma-fence-array.h> #include <linux/nospec.h>
+#include <drm/drm_drv.h> #include <drm/drm_exec.h> #include <drm/drm_print.h> #include <drm/ttm/ttm_execbuf_util.h> @@ -1581,9 +1582,40 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
static void xe_vm_close(struct xe_vm *vm) { + struct xe_device *xe = vm->xe; + bool bound; + int idx; + + bound = drm_dev_enter(&xe->drm, &idx); + down_write(&vm->lock); + vm->size = 0; + + if (!((vm->flags & XE_VM_FLAG_MIGRATION))) { + struct xe_tile *tile; + struct xe_gt *gt; + u8 id; + + /* Wait for pending binds */ + dma_resv_wait_timeout(xe_vm_resv(vm), + DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + + if (bound) { + for_each_tile(tile, xe, id) + if (vm->pt_root[id]) + xe_pt_clear(xe, vm->pt_root[id]); + + for_each_gt(gt, xe, id) + xe_gt_tlb_invalidation_vm(gt, vm); + } + } + up_write(&vm->lock); + + if (bound) + drm_dev_exit(idx); }
void xe_vm_close_and_put(struct xe_vm *vm) diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c index c439ed2f16db..af6bc76dbf64 100644 --- a/drivers/hid/usbhid/usbkbd.c +++ b/drivers/hid/usbhid/usbkbd.c @@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type, return -1;
spin_lock_irqsave(&kbd->leds_lock, flags); - kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | + kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | (!!test_bit(LED_NUML, dev->led));
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index f5bdf842040e..b043fbd15c9d 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -73,7 +73,7 @@ #define DELL_SMM_LEGACY_EXECUTE 0x1
#define DELL_SMM_NO_TEMP 10 -#define DELL_SMM_NO_FANS 3 +#define DELL_SMM_NO_FANS 4
struct smm_regs { unsigned int eax; @@ -1074,11 +1074,14 @@ static const struct hwmon_channel_info * const dell_smm_info[] = { HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | HWMON_F_TARGET, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | + HWMON_F_TARGET, + HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | HWMON_F_TARGET ), HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE, HWMON_PWM_INPUT, + HWMON_PWM_INPUT, HWMON_PWM_INPUT ), NULL diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index d92c536be9af..b779240328d5 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -393,7 +393,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev, if (state >= fan_data->num_speed) return -EINVAL;
+ mutex_lock(&fan_data->lock); + set_fan_speed(fan_data, state); + + mutex_unlock(&fan_data->lock); + return 0; }
@@ -489,7 +494,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
static void gpio_fan_stop(void *data) { + struct gpio_fan_data *fan_data = data; + + mutex_lock(&fan_data->lock); set_fan_speed(data, 0); + mutex_unlock(&fan_data->lock); }
static int gpio_fan_probe(struct platform_device *pdev) @@ -562,7 +571,9 @@ static int gpio_fan_suspend(struct device *dev)
if (fan_data->gpios) { fan_data->resume_speed = fan_data->speed_index; + mutex_lock(&fan_data->lock); set_fan_speed(fan_data, 0); + mutex_unlock(&fan_data->lock); }
return 0; @@ -572,8 +583,11 @@ static int gpio_fan_resume(struct device *dev) { struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->gpios) + if (fan_data->gpios) { + mutex_lock(&fan_data->lock); set_fan_speed(fan_data, fan_data->resume_speed); + mutex_unlock(&fan_data->lock); + }
return 0; } diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 92d82faf237f..4e05077e4256 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -105,7 +105,7 @@ struct xgene_hwmon_dev {
phys_addr_t comm_base_addr; void *pcc_comm_addr; - u64 usecs_lat; + unsigned int usecs_lat; };
/* diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index aea9ac9c4bd0..7948597d483d 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -84,7 +84,7 @@ struct etb_drvdata { struct clk *atclk; struct coresight_device *csdev; struct miscdevice miscdev; - spinlock_t spinlock; + raw_spinlock_t spinlock; local_t reading; pid_t pid; u8 *buf; @@ -145,7 +145,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev) unsigned long flags; struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't messup with perf sessions. */ if (coresight_get_mode(csdev) == CS_MODE_PERF) { @@ -163,7 +163,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
csdev->refcnt++; out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; }
@@ -176,7 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) struct perf_output_handle *handle = data; struct cs_buffers *buf = etm_perf_sink_config(handle);
- spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* No need to continue if the component is already in used by sysFS. */ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { @@ -219,7 +219,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) }
out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; }
@@ -352,11 +352,11 @@ static int etb_disable(struct coresight_device *csdev) struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); unsigned long flags;
- spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags);
csdev->refcnt--; if (csdev->refcnt) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; }
@@ -366,7 +366,7 @@ static int etb_disable(struct coresight_device *csdev) /* Dissociate from monitored process. */ drvdata->pid = -1; coresight_set_mode(csdev, CS_MODE_DISABLED); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "ETB disabled\n"); return 0; @@ -443,7 +443,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
- spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */ if (csdev->refcnt != 1) @@ -566,7 +566,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev, __etb_enable_hw(drvdata); CS_LOCK(drvdata->base); out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read; } @@ -587,13 +587,13 @@ static void etb_dump(struct etb_drvdata *drvdata) { unsigned long flags;
- spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { __etb_disable_hw(drvdata); etb_dump_hw(drvdata); __etb_enable_hw(drvdata); } - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&drvdata->csdev->dev, "ETB dumped\n"); } @@ -746,7 +746,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) drvdata->base = base; desc.access = CSDEV_ACCESS_IOMEM(base);
- spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock);
drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index 4b6359326ede..4f7d2b6d79e2 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig @@ -60,6 +60,7 @@ config INTEL_TH_STH
config INTEL_TH_MSU tristate "Intel(R) Trace Hub Memory Storage Unit" + depends on MMU help Memory Storage Unit (MSU) trace output device enables storing STP traces to system memory. It supports single diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 66123d684ac9..93b65a9731d7 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/workqueue.h> #include <linux/dma-mapping.h> +#include <linux/pfn_t.h>
#ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -967,7 +968,6 @@ static void msc_buffer_contig_free(struct msc *msc) for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { struct page *page = virt_to_page(msc->base + off);
- page->mapping = NULL; __free_page(page); }
@@ -1149,9 +1149,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) int i;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { - struct page *page = msc_sg_page(sg); - - page->mapping = NULL; dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, sg_virt(sg), sg_dma_address(sg)); } @@ -1592,22 +1589,10 @@ static void msc_mmap_close(struct vm_area_struct *vma) { struct msc_iter *iter = vma->vm_file->private_data; struct msc *msc = iter->msc; - unsigned long pg;
if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) return;
- /* drop page _refcounts */ - for (pg = 0; pg < msc->nr_pages; pg++) { - struct page *page = msc_buffer_get_page(msc, pg); - - if (WARN_ON_ONCE(!page)) - continue; - - if (page->mapping) - page->mapping = NULL; - } - /* last mapping -- drop user_count */ atomic_dec(&msc->user_count); mutex_unlock(&msc->buf_mutex); @@ -1617,16 +1602,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) { struct msc_iter *iter = vmf->vma->vm_file->private_data; struct msc *msc = iter->msc; + struct page *page;
- vmf->page = msc_buffer_get_page(msc, vmf->pgoff); - if (!vmf->page) + page = msc_buffer_get_page(msc, vmf->pgoff); + if (!page) return VM_FAULT_SIGBUS;
- get_page(vmf->page); - vmf->page->mapping = vmf->vma->vm_file->f_mapping; - vmf->page->index = vmf->pgoff; - - return 0; + get_page(page); + return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page)); }
static const struct vm_operations_struct msc_mmap_ops = { @@ -1667,7 +1650,7 @@ static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma) atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP); vma->vm_ops = &msc_mmap_ops; return ret; } diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 7b2c5d71a7fc..5ea6d40373e7 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -207,6 +207,7 @@ static const struct software_node dgpu_node = { static int i2c_dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct device *device = &pdev->dev; struct dw_i2c_dev *dev; struct i2c_adapter *adap; int r; @@ -214,25 +215,22 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, struct dw_scl_sda_cfg *cfg;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) - return dev_err_probe(&pdev->dev, -EINVAL, - "Invalid driver data %ld\n", + return dev_err_probe(device, -EINVAL, "Invalid driver data %ld\n", id->driver_data);
controller = &dw_pci_controllers[id->driver_data];
r = pcim_enable_device(pdev); if (r) - return dev_err_probe(&pdev->dev, r, - "Failed to enable I2C PCI device\n"); + return dev_err_probe(device, r, "Failed to enable I2C PCI device\n");
pci_set_master(pdev);
r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); if (r) - return dev_err_probe(&pdev->dev, r, - "I/O memory remapping failed\n"); + return dev_err_probe(device, r, "I/O memory remapping failed\n");
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM;
@@ -242,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->get_clk_rate_khz = controller->get_clk_rate_khz; dev->base = pcim_iomap_table(pdev)[0]; - dev->dev = &pdev->dev; + dev->dev = device; dev->irq = pci_irq_vector(pdev, 0); dev->flags |= controller->flags;
@@ -280,15 +278,17 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) { dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node); - if (IS_ERR(dev->slave)) - return dev_err_probe(dev->dev, PTR_ERR(dev->slave), + if (IS_ERR(dev->slave)) { + i2c_del_adapter(&dev->adapter); + return dev_err_probe(device, PTR_ERR(dev->slave), "register UCSI failed\n"); + } }
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); - pm_runtime_allow(&pdev->dev); + pm_runtime_set_autosuspend_delay(device, 1000); + pm_runtime_use_autosuspend(device); + pm_runtime_put_autosuspend(device); + pm_runtime_allow(device);
return 0; } @@ -296,11 +296,12 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, static void i2c_dw_pci_remove(struct pci_dev *pdev) { struct dw_i2c_dev *dev = pci_get_drvdata(pdev); + struct device *device = &pdev->dev;
i2c_dw_disable(dev);
- pm_runtime_forbid(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); + pm_runtime_forbid(device); + pm_runtime_get_noresume(device);
i2c_del_adapter(&dev->adapter); } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2d0c7348e491..a3e86930bf41 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -205,6 +205,7 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
static int dw_i2c_plat_probe(struct platform_device *pdev) { + struct device *device = &pdev->dev; struct i2c_adapter *adap; struct dw_i2c_dev *dev; int irq, ret; @@ -213,15 +214,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (irq < 0) return irq;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL); + dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM;
- dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); - if (device_property_present(&pdev->dev, "wx,i2c-snps-model")) + dev->flags = (uintptr_t)device_get_match_data(device); + if (device_property_present(device, "wx,i2c-snps-model")) dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING;
- dev->dev = &pdev->dev; + dev->dev = device; dev->irq = irq; platform_set_drvdata(pdev, dev);
@@ -229,7 +230,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (ret) return ret;
- dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + dev->rst = devm_reset_control_get_optional_exclusive(device, NULL); if (IS_ERR(dev->rst)) return PTR_ERR(dev->rst);
@@ -246,13 +247,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) i2c_dw_configure(dev);
/* Optional interface clock */ - dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); + dev->pclk = devm_clk_get_optional(device, "pclk"); if (IS_ERR(dev->pclk)) { ret = PTR_ERR(dev->pclk); goto exit_reset; }
- dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + dev->clk = devm_clk_get_optional(device, NULL); if (IS_ERR(dev->clk)) { ret = PTR_ERR(dev->clk); goto exit_reset; @@ -280,28 +281,24 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED; adap->nr = -1;
- if (dev->flags & ACCESS_NO_IRQ_SUSPEND) { - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE); - } else { - dev_pm_set_driver_flags(&pdev->dev, - DPM_FLAG_SMART_PREPARE | - DPM_FLAG_SMART_SUSPEND); - } + if (dev->flags & ACCESS_NO_IRQ_SUSPEND) + dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE); + else + dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND);
- device_enable_async_suspend(&pdev->dev); + device_enable_async_suspend(device);
/* The code below assumes runtime PM to be disabled. */ - WARN_ON(pm_runtime_enabled(&pdev->dev)); + WARN_ON(pm_runtime_enabled(device));
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(device, 1000); + pm_runtime_use_autosuspend(device); + pm_runtime_set_active(device);
if (dev->shared_with_punit) - pm_runtime_get_noresume(&pdev->dev); + pm_runtime_get_noresume(device);
- pm_runtime_enable(&pdev->dev); + pm_runtime_enable(device);
ret = i2c_dw_probe(dev); if (ret) @@ -319,15 +316,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) static void dw_i2c_plat_remove(struct platform_device *pdev) { struct dw_i2c_dev *dev = platform_get_drvdata(pdev); + struct device *device = &pdev->dev;
- pm_runtime_get_sync(&pdev->dev); + pm_runtime_get_sync(device);
i2c_del_adapter(&dev->adapter);
i2c_dw_disable(dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(device); + pm_runtime_put_sync(device); dw_i2c_plat_pm_cleanup(dev);
i2c_dw_remove_lock_support(dev); diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index 4d76e71cdd4b..afc1a8171f59 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1503,7 +1503,10 @@ static int i2c_pxa_probe(struct platform_device *dev) i2c->adap.name); }
- clk_prepare_enable(i2c->clk); + ret = clk_prepare_enable(i2c->clk); + if (ret) + return dev_err_probe(&dev->dev, ret, + "failed to enable clock\n");
if (i2c->use_pio) { i2c->adap.algo = &i2c_pxa_pio_algorithm; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index d480162a4d39..eb97abcb4cd3 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -14,6 +14,7 @@ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> +#include <linux/interconnect.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -150,6 +151,8 @@ /* TAG length for DATA READ in RX FIFO */ #define READ_RX_TAGS_LEN 2
+#define QUP_BUS_WIDTH 8 + static unsigned int scl_freq; module_param_named(scl_freq, scl_freq, uint, 0444); MODULE_PARM_DESC(scl_freq, "SCL frequency override"); @@ -227,6 +230,7 @@ struct qup_i2c_dev { int irq; struct clk *clk; struct clk *pclk; + struct icc_path *icc_path; struct i2c_adapter adap;
int clk_ctl; @@ -255,6 +259,10 @@ struct qup_i2c_dev { /* To configure when bus is in run state */ u32 config_run;
+ /* bandwidth votes */ + u32 src_clk_freq; + u32 cur_bw_clk_freq; + /* dma parameters */ bool is_dma; /* To check if the current transfer is using DMA */ @@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) return ret; }
+static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq) +{ + u32 needed_peak_bw; + int ret; + + if (qup->cur_bw_clk_freq == clk_freq) + return 0; + + needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH); + ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw); + if (ret) + return ret; + + qup->cur_bw_clk_freq = clk_freq; + return 0; +} + static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; @@ -838,6 +863,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int ret = 0; int idx = 0;
+ ret = qup_i2c_vote_bw(qup, qup->src_clk_freq); + if (ret) + return ret; + enable_irq(qup->irq); ret = qup_i2c_req_dma(qup);
@@ -1643,6 +1672,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup) config = readl(qup->base + QUP_CONFIG); config |= QUP_CLOCK_AUTO_GATE; writel(config, qup->base + QUP_CONFIG); + qup_i2c_vote_bw(qup, 0); clk_disable_unprepare(qup->pclk); }
@@ -1743,6 +1773,11 @@ static int qup_i2c_probe(struct platform_device *pdev) goto fail_dma; } qup->is_dma = true; + + qup->icc_path = devm_of_icc_get(&pdev->dev, NULL); + if (IS_ERR(qup->icc_path)) + return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path), + "failed to get interconnect path\n"); }
nodma: @@ -1791,6 +1826,7 @@ static int qup_i2c_probe(struct platform_device *pdev) qup_i2c_enable_clocks(qup); src_clk_freq = clk_get_rate(qup->clk); } + qup->src_clk_freq = src_clk_freq;
/* * Bootloaders might leave a pending interrupt on certain QUP's, diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 7911814ad82a..474a96ebda22 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -511,6 +511,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) queue_work(master->base.wq, &master->hj_work); break; case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: + svc_i3c_master_emit_stop(master); + break; default: break; } @@ -859,6 +861,8 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, u32 reg; int ret, i;
+ svc_i3c_master_flush_fifo(master); + while (true) { /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA. * diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c index 0f36138a7144..58a25792cec3 100644 --- a/drivers/iio/adc/ad7944.c +++ b/drivers/iio/adc/ad7944.c @@ -98,6 +98,9 @@ struct ad7944_chip_info { const struct iio_chan_spec channels[2]; };
+/* get number of bytes for SPI xfer */ +#define AD7944_SPI_BYTES(scan_type) ((scan_type).realbits > 16 ? 4 : 2) + /* * AD7944_DEFINE_CHIP_INFO - Define a chip info structure for a specific chip * @_name: The name of the chip @@ -164,7 +167,7 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
/* Then we can read the data during the acquisition phase */ xfers[2].rx_buf = &adc->sample.raw; - xfers[2].len = BITS_TO_BYTES(chan->scan_type.storagebits); + xfers[2].len = AD7944_SPI_BYTES(chan->scan_type); xfers[2].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 3); @@ -193,7 +196,7 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &adc->sample.raw; - xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits); + xfers[1].len = AD7944_SPI_BYTES(chan->scan_type); xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2); @@ -228,7 +231,7 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = adc->chain_mode_buf; - xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits) * n_chain_dev; + xfers[1].len = AD7944_SPI_BYTES(chan->scan_type) * n_chain_dev; xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2); @@ -274,12 +277,12 @@ static int ad7944_single_conversion(struct ad7944_adc *adc, return ret;
if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) { - if (chan->scan_type.storagebits > 16) + if (chan->scan_type.realbits > 16) *val = ((u32 *)adc->chain_mode_buf)[chan->scan_index]; else *val = ((u16 *)adc->chain_mode_buf)[chan->scan_index]; } else { - if (chan->scan_type.storagebits > 16) + if (chan->scan_type.realbits > 16) *val = adc->sample.raw.u32; else *val = adc->sample.raw.u16; @@ -409,8 +412,7 @@ static int ad7944_chain_mode_alloc(struct device *dev, /* 1 word for each voltage channel + aligned u64 for timestamp */
chain_mode_buf_size = ALIGN(n_chain_dev * - BITS_TO_BYTES(chan[0].scan_type.storagebits), sizeof(u64)) - + sizeof(u64); + AD7944_SPI_BYTES(chan[0].scan_type), sizeof(u64)) + sizeof(u64); buf = devm_kzalloc(dev, chain_mode_buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 07c571c7b699..c5b686394760 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt) { - struct scatterlist *sg; + unsigned long curr_len = 0; + dma_addr_t curr_base = ~0; unsigned long va, pgoff; + struct scatterlist *sg; dma_addr_t mask; + dma_addr_t end; int i;
umem->iova = va = virt; @@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, pgoff = umem->address & ~PAGE_MASK;
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { - /* Walk SGL and reduce max page size if VA/PA bits differ - * for any address. + /* If the current entry is physically contiguous with the previous + * one, no need to take its start addresses into consideration. */ - mask |= (sg_dma_address(sg) + pgoff) ^ va; + if (check_add_overflow(curr_base, curr_len, &end) || + end != sg_dma_address(sg)) { + + curr_base = sg_dma_address(sg); + curr_len = 0; + + /* Reduce max page size if VA/PA bits differ */ + mask |= (curr_base + pgoff) ^ va; + + /* The alignment of any VA matching a discontinuity point + * in the physical memory sets the maximum possible page + * size as this must be a starting point of a new page that + * needs to be aligned. + */ + if (i != 0) + mask |= va; + } + + curr_len += sg_dma_len(sg); va += sg_dma_len(sg) - pgoff; - /* Except for the last entry, the ending iova alignment sets - * the maximum possible page size as the low bits of the iova - * must be zero when starting the next chunk. - */ - if (i != (umem->sgt_append.sgt.nents - 1)) - mask |= va; + pgoff = 0; }
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index edef79daed3f..535bb99ed9f5 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -718,8 +718,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) goto err_free;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_free; }
@@ -809,8 +809,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) if (cmd.flags & IB_MR_REREG_PD) { new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!new_pd) { - ret = -EINVAL; + if (IS_ERR(new_pd)) { + ret = PTR_ERR(new_pd); goto put_uobjs; } } else { @@ -919,8 +919,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs) return PTR_ERR(uobj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_free; }
@@ -1127,8 +1127,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs) return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata); if (ret) @@ -1189,8 +1189,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq);
/* we copy a struct ib_uverbs_poll_cq_resp to user space */ header_ptr = attrs->ucore.outbuf; @@ -1238,8 +1238,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs) return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq);
ib_req_notify_cq(cq, cmd.solicited_only ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); @@ -1321,8 +1321,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs, ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, cmd->rwq_ind_tbl_handle, attrs); - if (!ind_tbl) { - ret = -EINVAL; + if (IS_ERR(ind_tbl)) { + ret = PTR_ERR(ind_tbl); goto err_put; }
@@ -1360,8 +1360,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs, if (cmd->is_srq) { srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle, attrs); - if (!srq || srq->srq_type == IB_SRQT_XRC) { - ret = -EINVAL; + if (IS_ERR(srq) || + srq->srq_type == IB_SRQT_XRC) { + ret = IS_ERR(srq) ? PTR_ERR(srq) : + -EINVAL; goto err_put; } } @@ -1371,23 +1373,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs, rcq = uobj_get_obj_read( cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle, attrs); - if (!rcq) { - ret = -EINVAL; + if (IS_ERR(rcq)) { + ret = PTR_ERR(rcq); goto err_put; } } } }
- if (has_sq) + if (has_sq) { scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle, attrs); + if (IS_ERR(scq)) { + ret = PTR_ERR(scq); + goto err_put; + } + } + if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI) rcq = rcq ?: scq; pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); - if (!pd || (!scq && has_sq)) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_put; }
@@ -1482,18 +1490,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs, err_put: if (!IS_ERR(xrcd_uobj)) uobj_put_read(xrcd_uobj); - if (pd) + if (!IS_ERR_OR_NULL(pd)) uobj_put_obj_read(pd); - if (scq) + if (!IS_ERR_OR_NULL(scq)) rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); - if (rcq && rcq != scq) + if (!IS_ERR_OR_NULL(rcq) && rcq != scq) rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); - if (srq) + if (!IS_ERR_OR_NULL(srq)) rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); - if (ind_tbl) + if (!IS_ERR_OR_NULL(ind_tbl)) uobj_put_obj_read(ind_tbl);
uobj_alloc_abort(&obj->uevent.uobject, attrs); @@ -1655,8 +1663,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs) }
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; }
@@ -1761,8 +1769,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; }
@@ -2028,8 +2036,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; }
@@ -2066,9 +2074,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah, attrs); - if (!ud->ah) { + if (IS_ERR(ud->ah)) { + ret = PTR_ERR(ud->ah); kfree(ud); - ret = -EINVAL; goto out_put; } ud->remote_qpn = user_wr->wr.ud.remote_qpn; @@ -2305,8 +2313,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs) return PTR_ERR(wr);
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; }
@@ -2356,8 +2364,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs) return PTR_ERR(wr);
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); - if (!srq) { - ret = -EINVAL; + if (IS_ERR(srq)) { + ret = PTR_ERR(srq); goto out; }
@@ -2413,8 +2421,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs) }
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err; }
@@ -2483,8 +2491,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs) return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) - return -EINVAL; + if (IS_ERR(qp)) + return PTR_ERR(qp);
obj = qp->uobject;
@@ -2533,8 +2541,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs) return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) - return -EINVAL; + if (IS_ERR(qp)) + return PTR_ERR(qp);
obj = qp->uobject; mutex_lock(&obj->mcast_lock); @@ -2668,8 +2676,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, UVERBS_OBJECT_FLOW_ACTION, kern_spec->action.handle, attrs); - if (!ib_spec->action.act) - return -EINVAL; + if (IS_ERR(ib_spec->action.act)) + return PTR_ERR(ib_spec->action.act); ib_spec->action.size = sizeof(struct ib_flow_spec_action_handle); flow_resources_add(uflow_res, @@ -2686,8 +2694,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, UVERBS_OBJECT_COUNTERS, kern_spec->flow_count.handle, attrs); - if (!ib_spec->flow_count.counters) - return -EINVAL; + if (IS_ERR(ib_spec->flow_count.counters)) + return PTR_ERR(ib_spec->flow_count.counters); ib_spec->flow_count.size = sizeof(struct ib_flow_spec_action_count); flow_resources_add(uflow_res, @@ -2905,14 +2913,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs) return PTR_ERR(obj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - err = -EINVAL; + if (IS_ERR(pd)) { + err = PTR_ERR(pd); goto err_uobj; }
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) { - err = -EINVAL; + if (IS_ERR(cq)) { + err = PTR_ERR(cq); goto err_put_pd; }
@@ -3013,8 +3021,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs) return -EINVAL;
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); - if (!wq) - return -EINVAL; + if (IS_ERR(wq)) + return PTR_ERR(wq);
if (cmd.attr_mask & IB_WQ_FLAGS) { wq_attr.flags = cmd.flags; @@ -3097,8 +3105,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs) num_read_wqs++) { wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs], attrs); - if (!wq) { - err = -EINVAL; + if (IS_ERR(wq)) { + err = PTR_ERR(wq); goto put_wqs; }
@@ -3253,8 +3261,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs) }
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - err = -EINVAL; + if (IS_ERR(qp)) { + err = PTR_ERR(qp); goto err_uobj; }
@@ -3400,15 +3408,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, if (ib_srq_has_cq(cmd->srq_type)) { attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle, attrs); - if (!attr.ext.cq) { - ret = -EINVAL; + if (IS_ERR(attr.ext.cq)) { + ret = PTR_ERR(attr.ext.cq); goto err_put_xrcd; } }
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_put_cq; }
@@ -3515,8 +3523,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs) return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); - if (!srq) - return -EINVAL; + if (IS_ERR(srq)) + return PTR_ERR(srq);
attr.max_wr = cmd.max_wr; attr.srq_limit = cmd.srq_limit; @@ -3543,8 +3551,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs) return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); - if (!srq) - return -EINVAL; + if (IS_ERR(srq)) + return PTR_ERR(srq);
ret = ib_query_srq(srq, &attr);
@@ -3669,8 +3677,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs) return -EOPNOTSUPP;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq);
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 473ee0831307..dc40001072a5 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -3109,22 +3109,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start); bool __rdma_block_iter_next(struct ib_block_iter *biter) { unsigned int block_offset; - unsigned int sg_delta; + unsigned int delta;
if (!biter->__sg_nents || !biter->__sg) return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); - sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; + delta = BIT_ULL(biter->__pg_bit) - block_offset;
- if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { - biter->__sg_advance += sg_delta; - } else { + while (biter->__sg_nents && biter->__sg && + sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) { + delta -= sg_dma_len(biter->__sg) - biter->__sg_advance; biter->__sg_advance = 0; biter->__sg = sg_next(biter->__sg); biter->__sg_nents--; } + biter->__sg_advance += delta;
return true; } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index e14d8f316ad8..da5b14602a76 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -289,6 +289,8 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, + { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, @@ -353,6 +355,7 @@ static const struct xpad_device { { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, { 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index c616de2c5926..a56a27396305 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -254,7 +254,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd, iova, map_size, gfp, &updated); if (!pte) { - ret = -EINVAL; + ret = -ENOMEM; goto out; }
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2a9fa0c8cc00..0f0caf59023c 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1815,7 +1815,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) { - desc->iommu_cookie = NULL; + msi_desc_set_iommu_msi_iova(desc, 0, 0); return 0; }
@@ -1827,11 +1827,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) mutex_lock(&msi_prepare_lock); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); mutex_unlock(&msi_prepare_lock); - - msi_desc_set_iommu_cookie(desc, msi_page); - if (!msi_page) return -ENOMEM; + + msi_desc_set_iommu_msi_iova( + desc, msi_page->iova, + ilog2(cookie_msi_granule(domain->iova_cookie))); return 0; }
@@ -1842,18 +1843,15 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) */ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) { - struct device *dev = msi_desc_to_dev(desc); - const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - const struct iommu_dma_msi_page *msi_page; +#ifdef CONFIG_IRQ_MSI_IOMMU + if (desc->iommu_msi_shift) { + u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
- msi_page = msi_desc_get_iommu_cookie(desc); - - if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) - return; - - msg->address_hi = upper_32_bits(msi_page->iova); - msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; - msg->address_lo += lower_32_bits(msi_page->iova); + msg->address_hi = upper_32_bits(msi_iova); + msg->address_lo = lower_32_bits(msi_iova) | + (msg->address_lo & ((1 << desc->iommu_msi_shift) - 1)); + } +#endif }
static int iommu_dma_init(void) diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h index de5b54eaa8bf..a5913c0b02a0 100644 --- a/drivers/iommu/iommu-priv.h +++ b/drivers/iommu/iommu-priv.h @@ -17,6 +17,8 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) return dev->iommu->iommu_dev->ops; }
+void dev_iommu_free(struct device *dev); + const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index cac3dce11168..879009adef40 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -347,7 +347,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) return param; }
-static void dev_iommu_free(struct device *dev) +void dev_iommu_free(struct device *dev) { struct dev_iommu *param = dev->iommu;
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index 3fd8920e79ff..74480ae6bfc0 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -3,6 +3,7 @@ */ #include <linux/iommu.h> #include <linux/iommufd.h> +#include <linux/pci-ats.h> #include <linux/slab.h> #include <uapi/linux/iommufd.h>
@@ -1304,7 +1305,8 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) void *data; int rc;
- if (cmd->flags || cmd->__reserved) + if (cmd->flags || cmd->__reserved[0] || cmd->__reserved[1] || + cmd->__reserved[2]) return -EOPNOTSUPP;
idev = iommufd_get_device(ucmd, cmd->dev_id); @@ -1361,6 +1363,36 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING;
+ cmd->out_max_pasid_log2 = 0; + /* + * Currently, all iommu drivers enable PASID in the probe_device() + * op if iommu and device supports it. So the max_pasids stored in + * dev->iommu indicates both PASID support and enable status. A + * non-zero dev->iommu->max_pasids means PASID is supported and + * enabled. The iommufd only reports PASID capability to userspace + * if it's enabled. + */ + if (idev->dev->iommu->max_pasids) { + cmd->out_max_pasid_log2 = ilog2(idev->dev->iommu->max_pasids); + + if (dev_is_pci(idev->dev)) { + struct pci_dev *pdev = to_pci_dev(idev->dev); + int ctrl; + + ctrl = pci_pasid_status(pdev); + + WARN_ON_ONCE(ctrl < 0 || + !(ctrl & PCI_PASID_CTRL_ENABLE)); + + if (ctrl & PCI_PASID_CTRL_EXEC) + cmd->out_capabilities |= + IOMMU_HW_CAP_PCI_PASID_EXEC; + if (ctrl & PCI_PASID_CTRL_PRIV) + cmd->out_capabilities |= + IOMMU_HW_CAP_PCI_PASID_PRIV; + } + } + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_free: kfree(data); diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index d06bf6e6c19f..2454627a8b61 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -122,6 +122,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) return ERR_PTR(-EOPNOTSUPP); + if ((flags & IOMMU_HWPT_FAULT_ID_VALID) && + (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)) + return ERR_PTR(-EOPNOTSUPP);
hwpt_paging = __iommufd_object_alloc( ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index e7a6a1611d19..e3fcab925a54 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -118,6 +118,7 @@ static void of_pci_check_device_ats(struct device *dev, struct device_node *np) int of_iommu_configure(struct device *dev, struct device_node *master_np, const u32 *id) { + bool dev_iommu_present; int err;
if (!master_np) @@ -129,6 +130,7 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np, mutex_unlock(&iommu_probe_device_lock); return 0; } + dev_iommu_present = dev->iommu;
/* * We don't currently walk up the tree looking for a parent IOMMU. @@ -149,8 +151,10 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np, err = of_iommu_configure_device(master_np, dev, id); }
- if (err) + if (err && dev_iommu_present) iommu_fwspec_free(dev); + else if (err && dev->iommu) + dev_iommu_free(dev); mutex_unlock(&iommu_probe_device_lock);
if (!err && dev->bus) diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c index 7cd6b646774b..205ad61d15e4 100644 --- a/drivers/irqchip/irq-riscv-aplic-direct.c +++ b/drivers/irqchip/irq-riscv-aplic-direct.c @@ -31,7 +31,7 @@ struct aplic_direct { };
struct aplic_idc { - unsigned int hart_index; + u32 hart_index; void __iomem *regs; struct aplic_direct *direct; }; @@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, return 0; }
+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index, + u32 *hart_index) +{ + const char *prop_hart_index = "riscv,hart-indexes"; + struct device_node *np = to_of_node(dev->fwnode); + + if (!np || !of_property_present(np, prop_hart_index)) { + *hart_index = logical_index; + return 0; + } + + return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index); +} + int aplic_direct_setup(struct device *dev, void __iomem *regs) { int i, j, rc, cpu, current_cpu, setup_count = 0; @@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs) cpumask_set_cpu(cpu, &direct->lmask);
idc = per_cpu_ptr(&aplic_idcs, cpu); - idc->hart_index = i; - idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE; + rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index); + if (rc) { + dev_warn(dev, "hart index not found for IDC%d\n", i); + continue; + } + idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE; idc->direct = direct;
aplic_idc_set_delivery(idc, true); diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c index c5c2e6929a2f..b5def6268936 100644 --- a/drivers/irqchip/irq-riscv-imsic-early.c +++ b/drivers/irqchip/irq-riscv-imsic-early.c @@ -77,6 +77,12 @@ static void imsic_handle_irq(struct irq_desc *desc) struct imsic_vector *vec; unsigned long local_id;
+ /* + * Process pending local synchronization instead of waiting + * for per-CPU local timer to expire. + */ + imsic_local_sync_all(false); + chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) { @@ -120,7 +126,7 @@ static int imsic_starting_cpu(unsigned int cpu) * Interrupts identities might have been enabled/disabled while * this CPU was not running so sync-up local enable/disable state. */ - imsic_local_sync_all(); + imsic_local_sync_all(true);
/* Enable local interrupt delivery */ imsic_local_delivery(true); diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c index c708780e8760..5d7c30ad8855 100644 --- a/drivers/irqchip/irq-riscv-imsic-platform.c +++ b/drivers/irqchip/irq-riscv-imsic-platform.c @@ -96,9 +96,8 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask bool force) { struct imsic_vector *old_vec, *new_vec; - struct irq_data *pd = d->parent_data;
- old_vec = irq_data_get_irq_chip_data(pd); + old_vec = irq_data_get_irq_chip_data(d); if (WARN_ON(!old_vec)) return -ENOENT;
@@ -116,13 +115,13 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask return -ENOSPC;
/* Point device to the new vector */ - imsic_msi_update_msg(d, new_vec); + imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */ - pd->chip_data = new_vec; + d->chip_data = new_vec;
- /* Update effective affinity of parent irq data */ - irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu)); + /* Update effective affinity */ + irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
/* Move state of the old vector to the new vector */ imsic_vector_move(old_vec, new_vec); @@ -135,6 +134,9 @@ static struct irq_chip imsic_irq_base_chip = { .name = "IMSIC", .irq_mask = imsic_irq_mask, .irq_unmask = imsic_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = imsic_irq_set_affinity, +#endif .irq_retrigger = imsic_irq_retrigger, .irq_compose_msi_msg = imsic_irq_compose_msg, .flags = IRQCHIP_SKIP_SET_WAKE | @@ -245,7 +247,7 @@ static bool imsic_init_dev_msi_info(struct device *dev, if (WARN_ON_ONCE(domain != real_parent)) return false; #ifdef CONFIG_SMP - info->chip->irq_set_affinity = imsic_irq_set_affinity; + info->chip->irq_set_affinity = irq_chip_set_affinity_parent; #endif break; default: diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c index b97e6cd89ed7..06ff0e17c0c3 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.c +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, } }
-static void __imsic_local_sync(struct imsic_local_priv *lpriv) +static bool __imsic_local_sync(struct imsic_local_priv *lpriv) { struct imsic_local_config *mlocal; struct imsic_vector *vec, *mvec; + bool ret = true; int i;
lockdep_assert_held(&lpriv->lock); @@ -143,35 +144,75 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv) __imsic_id_clear_enable(i);
/* - * If the ID was being moved to a new ID on some other CPU - * then we can get a MSI during the movement so check the - * ID pending bit and re-trigger the new ID on other CPU - * using MMIO write. + * Clear the previous vector pointer of the new vector only + * after the movement is complete on the old CPU. */ - mvec = READ_ONCE(vec->move); - WRITE_ONCE(vec->move, NULL); - if (mvec && mvec != vec) { + mvec = READ_ONCE(vec->move_prev); + if (mvec) { + /* + * If the old vector has not been updated then + * try again in the next sync-up call. + */ + if (READ_ONCE(mvec->move_next)) { + ret = false; + continue; + } + + WRITE_ONCE(vec->move_prev, NULL); + } + + /* + * If a vector was being moved to a new vector on some other + * CPU then we can get a MSI during the movement so check the + * ID pending bit and re-trigger the new ID on other CPU using + * MMIO write. + */ + mvec = READ_ONCE(vec->move_next); + if (mvec) { if (__imsic_id_read_clear_pending(i)) { mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); writel_relaxed(mvec->local_id, mlocal->msi_va); }
+ WRITE_ONCE(vec->move_next, NULL); imsic_vector_free(&lpriv->vectors[i]); }
skip: bitmap_clear(lpriv->dirty_bitmap, i, 1); } + + return ret; }
-void imsic_local_sync_all(void) +#ifdef CONFIG_SMP +static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + + if (!timer_pending(&lpriv->timer)) { + lpriv->timer.expires = jiffies + 1; + add_timer_on(&lpriv->timer, cpu); + } +} +#else +static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu) +{ +} +#endif + +void imsic_local_sync_all(bool force_all) { struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags); - bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); - __imsic_local_sync(lpriv); + + if (force_all) + bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); + if (!__imsic_local_sync(lpriv)) + __imsic_local_timer_start(lpriv, smp_processor_id()); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); }
@@ -190,12 +231,7 @@ void imsic_local_delivery(bool enable) #ifdef CONFIG_SMP static void imsic_local_timer_callback(struct timer_list *timer) { - struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); - unsigned long flags; - - raw_spin_lock_irqsave(&lpriv->lock, flags); - __imsic_local_sync(lpriv); - raw_spin_unlock_irqrestore(&lpriv->lock, flags); + imsic_local_sync_all(false); }
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) @@ -216,14 +252,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu */ if (cpu_online(cpu)) { if (cpu == smp_processor_id()) { - __imsic_local_sync(lpriv); - return; + if (__imsic_local_sync(lpriv)) + return; }
- if (!timer_pending(&lpriv->timer)) { - lpriv->timer.expires = jiffies + 1; - add_timer_on(&lpriv->timer, cpu); - } + __imsic_local_timer_start(lpriv, cpu); } } #else @@ -278,8 +311,9 @@ void imsic_vector_unmask(struct imsic_vector *vec) raw_spin_unlock(&lpriv->lock); }
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec, - bool new_enable, struct imsic_vector *new_move) +static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, + struct imsic_vector *vec, bool is_old_vec, + bool new_enable, struct imsic_vector *move_vec) { unsigned long flags; bool enabled; @@ -289,7 +323,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi /* Update enable and move details */ enabled = READ_ONCE(vec->enable); WRITE_ONCE(vec->enable, new_enable); - WRITE_ONCE(vec->move, new_move); + if (is_old_vec) + WRITE_ONCE(vec->move_next, move_vec); + else + WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */ bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); @@ -322,8 +359,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve * interrupt on the old vector while device was being moved * to the new vector. */ - enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec); - imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec); + enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec); + imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec); }
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS @@ -386,7 +423,8 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask vec = &lpriv->vectors[local_id]; vec->hwirq = hwirq; vec->enable = false; - vec->move = NULL; + vec->move_next = NULL; + vec->move_prev = NULL;
return vec; } diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h index 391e44280827..f02842b84ed5 100644 --- a/drivers/irqchip/irq-riscv-imsic-state.h +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -23,7 +23,8 @@ struct imsic_vector { unsigned int hwirq; /* Details accessed using local lock held */ bool enable; - struct imsic_vector *move; + struct imsic_vector *move_next; + struct imsic_vector *move_prev; };
struct imsic_local_priv { @@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id) __imsic_eix_update(id, 1, false, false); }
-void imsic_local_sync_all(void); +void imsic_local_sync_all(bool force_all); void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec); @@ -87,7 +88,7 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec) { - return READ_ONCE(vec->move); + return READ_ONCE(vec->move_prev); }
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec); diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c index e1a81e0109e8..c0aa34b1d0e2 100644 --- a/drivers/leds/rgb/leds-pwm-multicolor.c +++ b/drivers/leds/rgb/leds-pwm-multicolor.c @@ -135,8 +135,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
/* init the multicolor's LED class device */ cdev = &priv->mc_cdev.led_cdev; - fwnode_property_read_u32(mcnode, "max-brightness", + ret = fwnode_property_read_u32(mcnode, "max-brightness", &cdev->max_brightness); + if (ret) + goto release_mcnode; + cdev->flags = LED_CORE_SUSPENDRESUME; cdev->brightness_set_blocking = led_pwm_mc_set;
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index 4b0863db901a..356a55ced2c2 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -68,6 +68,7 @@ struct led_netdev_data { unsigned int last_activity;
unsigned long mode; + unsigned long blink_delay; int link_speed; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes); u8 duplex; @@ -86,6 +87,10 @@ static void set_baseline_state(struct led_netdev_data *trigger_data) /* Already validated, hw control is possible with the requested mode */ if (trigger_data->hw_control) { led_cdev->hw_control_set(led_cdev, trigger_data->mode); + if (led_cdev->blink_set) { + led_cdev->blink_set(led_cdev, &trigger_data->blink_delay, + &trigger_data->blink_delay); + }
return; } @@ -454,10 +459,11 @@ static ssize_t interval_store(struct device *dev, size_t size) { struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); + struct led_classdev *led_cdev = trigger_data->led_cdev; unsigned long value; int ret;
- if (trigger_data->hw_control) + if (trigger_data->hw_control && !led_cdev->blink_set) return -EINVAL;
ret = kstrtoul(buf, 0, &value); @@ -466,9 +472,13 @@ static ssize_t interval_store(struct device *dev,
/* impose some basic bounds on the timer interval */ if (value >= 5 && value <= 10000) { - cancel_delayed_work_sync(&trigger_data->work); + if (trigger_data->hw_control) { + trigger_data->blink_delay = value; + } else { + cancel_delayed_work_sync(&trigger_data->work);
- atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); + atomic_set(&trigger_data->interval, msecs_to_jiffies(value)); + } set_baseline_state(trigger_data); /* resets timer */ }
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index d3d26a2c9895..cb174e788a96 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -415,11 +415,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
mutex_lock(&con_mutex);
- if (of_parse_phandle_with_args(dev->of_node, "mboxes", - "#mbox-cells", index, &spec)) { + ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", + index, &spec); + if (ret) { dev_dbg(dev, "%s: can't parse "mboxes" property\n", __func__); mutex_unlock(&con_mutex); - return ERR_PTR(-ENODEV); + return ERR_PTR(ret); }
chan = ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index f8215a8f656a..49254d99a8ad 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -419,8 +419,12 @@ int pcc_mbox_ioremap(struct mbox_chan *chan) return -1; pchan_info = chan->con_priv; pcc_mbox_chan = &pchan_info->chan; - pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr, - pcc_mbox_chan->shmem_size); + + pcc_mbox_chan->shmem = acpi_os_ioremap(pcc_mbox_chan->shmem_base_addr, + pcc_mbox_chan->shmem_size); + if (!pcc_mbox_chan->shmem) + return -ENXIO; + return 0; } EXPORT_SYMBOL_GPL(pcc_mbox_ioremap); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 849eb6333e98..6aa4095dc587 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2899,6 +2899,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache) return to_cblock(size); }
+static bool can_resume(struct cache *cache) +{ + /* + * Disallow retrying the resume operation for devices that failed the + * first resume attempt, as the failure leaves the policy object partially + * initialized. Retrying could trigger BUG_ON when loading cache mappings + * into the incomplete policy object. + */ + if (cache->sized && !cache->loaded_mappings) { + if (get_cache_mode(cache) != CM_WRITE) + DMERR("%s: unable to resume a failed-loaded cache, please check metadata.", + cache_device_name(cache)); + else + DMERR("%s: unable to resume cache due to missing proper cache table reload", + cache_device_name(cache)); + return false; + } + + return true; +} + static bool can_resize(struct cache *cache, dm_cblock_t new_size) { if (from_cblock(new_size) > from_cblock(cache->cache_size)) { @@ -2947,6 +2968,9 @@ static int cache_preresume(struct dm_target *ti) struct cache *cache = ti->private; dm_cblock_t csize = get_cache_dev_size(cache);
+ if (!can_resume(cache)) + return -EINVAL; + /* * Check to see if the cache has resized. */ diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3dc5bc3d29d6..883f01e78324 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -698,6 +698,10 @@ int dm_table_add_target(struct dm_table *t, const char *type, DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } + if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) { + DMERR("%s: too large device", dm_device_name(t->md)); + return -EINVAL; + }
ti->type = dm_get_target_type(type); if (!ti->type) { diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c index 627adc24af3b..053b7845d1f3 100644 --- a/drivers/md/dm-vdo/indexer/index-layout.c +++ b/drivers/md/dm-vdo/indexer/index-layout.c @@ -54,7 +54,6 @@ * Each save also has a unique nonce. */
-#define MAGIC_SIZE 32 #define NONCE_INFO_SIZE 32 #define MAX_SAVES 2
@@ -98,9 +97,11 @@ enum region_type { #define SUPER_VERSION_CURRENT 3 #define SUPER_VERSION_MAXIMUM 7
-static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*"; +static const u8 LAYOUT_MAGIC[] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*"; static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+#define MAGIC_SIZE (sizeof(LAYOUT_MAGIC) - 1) + struct region_header { u64 magic; u64 region_blocks; diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index fff847767755..b897f88250d2 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -31,9 +31,7 @@
#include <linux/completion.h> #include <linux/device-mapper.h> -#include <linux/kernel.h> #include <linux/lz4.h> -#include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/types.h> @@ -142,12 +140,6 @@ static void finish_vdo_request_queue(void *ptr) vdo_unregister_allocating_thread(); }
-#ifdef MODULE -#define MODULE_NAME THIS_MODULE->name -#else -#define MODULE_NAME "dm-vdo" -#endif /* MODULE */ - static const struct vdo_work_queue_type default_queue_type = { .start = start_vdo_request_queue, .finish = finish_vdo_request_queue, @@ -559,8 +551,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason, *vdo_ptr = vdo;
snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix), - "%s%u", MODULE_NAME, instance); - BUG_ON(vdo->thread_name_prefix[0] == '\0'); + "vdo%u", instance); result = vdo_allocate(vdo->thread_config.thread_count, struct vdo_thread, __func__, &vdo->threads); if (result != VDO_SUCCESS) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 19230404d8c2..d29125ee9e72 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1541,14 +1541,18 @@ static void __send_empty_flush(struct clone_info *ci) { struct dm_table *t = ci->map; struct bio flush_bio; + blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + + if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) == + (REQ_IDLE | REQ_SYNC)) + opf |= REQ_IDLE;
/* * Use an on-stack bio for this, it's safe since we don't * need to reference it after submit. It's just used as * the basis for the clone(s). */ - bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, - REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); + bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
ci->bio = &flush_bio; ci->sector_count = 0; diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c index 819ff9f7c90f..2a20a4fad796 100644 --- a/drivers/media/i2c/adv7180.c +++ b/drivers/media/i2c/adv7180.c @@ -195,6 +195,7 @@ struct adv7180_state; #define ADV7180_FLAG_V2 BIT(1) #define ADV7180_FLAG_MIPI_CSI2 BIT(2) #define ADV7180_FLAG_I2P BIT(3) +#define ADV7180_FLAG_TEST_PATTERN BIT(4)
struct adv7180_chip_info { unsigned int flags; @@ -682,11 +683,15 @@ static int adv7180_init_controls(struct adv7180_state *state) ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF); v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
- v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops, - V4L2_CID_TEST_PATTERN, - ARRAY_SIZE(test_pattern_menu) - 1, - 0, ARRAY_SIZE(test_pattern_menu) - 1, - test_pattern_menu); + if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) { + v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, + &adv7180_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(test_pattern_menu) - 1, + 0, + ARRAY_SIZE(test_pattern_menu) - 1, + test_pattern_menu); + }
state->sd.ctrl_handler = &state->ctrl_hdl; if (state->ctrl_hdl.error) { @@ -1221,7 +1226,7 @@ static const struct adv7180_chip_info adv7182_info = { };
static const struct adv7180_chip_info adv7280_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1235,7 +1240,8 @@ static const struct adv7180_chip_info adv7280_info = { };
static const struct adv7180_chip_info adv7280_m_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1256,7 +1262,8 @@ static const struct adv7180_chip_info adv7280_m_info = { };
static const struct adv7180_chip_info adv7281_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN7) | @@ -1271,7 +1278,8 @@ static const struct adv7180_chip_info adv7281_info = { };
static const struct adv7180_chip_info adv7281_m_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1291,7 +1299,8 @@ static const struct adv7180_chip_info adv7281_m_info = { };
static const struct adv7180_chip_info adv7281_ma_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1316,7 +1325,7 @@ static const struct adv7180_chip_info adv7281_ma_info = { };
static const struct adv7180_chip_info adv7282_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN7) | @@ -1331,7 +1340,8 @@ static const struct adv7180_chip_info adv7282_info = { };
static const struct adv7180_chip_info adv7282_m_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index 906aa314b7f8..eaa1496c71bb 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -74,7 +74,7 @@ #define IMX219_REG_VTS CCI_REG16(0x0160) #define IMX219_VTS_MAX 0xffff
-#define IMX219_VBLANK_MIN 4 +#define IMX219_VBLANK_MIN 32
/* HBLANK control - read only */ #define IMX219_PPL_DEFAULT 3448 diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c index fcfd1d851bd4..0beb80b8c458 100644 --- a/drivers/media/i2c/imx335.c +++ b/drivers/media/i2c/imx335.c @@ -559,12 +559,14 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl) imx335->vblank, imx335->vblank + imx335->cur_mode->height);
- return __v4l2_ctrl_modify_range(imx335->exp_ctrl, - IMX335_EXPOSURE_MIN, - imx335->vblank + - imx335->cur_mode->height - - IMX335_EXPOSURE_OFFSET, - 1, IMX335_EXPOSURE_DEFAULT); + ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl, + IMX335_EXPOSURE_MIN, + imx335->vblank + + imx335->cur_mode->height - + IMX335_EXPOSURE_OFFSET, + 1, IMX335_EXPOSURE_DEFAULT); + if (ret) + return ret; }
/* @@ -575,6 +577,13 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl) return 0;
switch (ctrl->id) { + case V4L2_CID_VBLANK: + exposure = imx335->exp_ctrl->val; + analog_gain = imx335->again_ctrl->val; + + ret = imx335_update_exp_gain(imx335, exposure, analog_gain); + + break; case V4L2_CID_EXPOSURE: exposure = ctrl->val; analog_gain = imx335->again_ctrl->val; diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c index 389582420ba7..048a1a381b33 100644 --- a/drivers/media/i2c/tc358746.c +++ b/drivers/media/i2c/tc358746.c @@ -460,24 +460,20 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746) return err; }
-/* Use MHz as base so the div needs no u64 */ -static u32 tc358746_cfg_to_cnt(unsigned int cfg_val, - unsigned int clk_mhz, - unsigned int time_base) +static u32 tc358746_cfg_to_cnt(unsigned long cfg_val, unsigned long clk_hz, + unsigned long long time_base) { - return DIV_ROUND_UP(cfg_val * clk_mhz, time_base); + return div64_u64((u64)cfg_val * clk_hz + time_base - 1, time_base); }
-static u32 tc358746_ps_to_cnt(unsigned int cfg_val, - unsigned int clk_mhz) +static u32 tc358746_ps_to_cnt(unsigned long cfg_val, unsigned long clk_hz) { - return tc358746_cfg_to_cnt(cfg_val, clk_mhz, USEC_PER_SEC); + return tc358746_cfg_to_cnt(cfg_val, clk_hz, PSEC_PER_SEC); }
-static u32 tc358746_us_to_cnt(unsigned int cfg_val, - unsigned int clk_mhz) +static u32 tc358746_us_to_cnt(unsigned long cfg_val, unsigned long clk_hz) { - return tc358746_cfg_to_cnt(cfg_val, clk_mhz, 1); + return tc358746_cfg_to_cnt(cfg_val, clk_hz, USEC_PER_SEC); }
static int tc358746_apply_dphy_config(struct tc358746 *tc358746) @@ -492,7 +488,6 @@ static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
/* The hs_byte_clk is also called SYSCLK in the excel sheet */ hs_byte_clk = cfg->hs_clk_rate / 8; - hs_byte_clk /= HZ_PER_MHZ; hf_clk = hs_byte_clk / 2;
val = tc358746_us_to_cnt(cfg->init, hf_clk) - 1; diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 858db5d4ca75..e51f2ed3f031 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c @@ -683,11 +683,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) int ret;
if (enable) { - ret = v4l2_ctrl_handler_setup(&csid->ctrls); - if (ret < 0) { - dev_err(csid->camss->dev, - "could not sync v4l2 controls: %d\n", ret); - return ret; + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) { + ret = v4l2_ctrl_handler_setup(&csid->ctrls); + if (ret < 0) { + dev_err(csid->camss->dev, + "could not sync v4l2 controls: %d\n", ret); + return ret; + } }
if (!csid->testgen.enabled && @@ -761,7 +763,8 @@ static void csid_try_format(struct csid_device *csid, break;
case MSM_CSID_PAD_SRC: - if (csid->testgen_mode->cur.val == 0) { + if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED || + csid->testgen_mode->cur.val == 0) { /* Test generator is disabled, */ /* keep pad formats in sync */ u32 code = fmt->code; @@ -811,7 +814,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csid->res->formats->formats[code->index].code; } else { - if (csid->testgen_mode->cur.val == 0) { + if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED || + csid->testgen_mode->cur.val == 0) { struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state, @@ -1190,7 +1194,8 @@ static int csid_link_setup(struct media_entity *entity,
/* If test generator is enabled */ /* do not allow a link from CSIPHY to CSID */ - if (csid->testgen_mode->cur.val != 0) + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED && + csid->testgen_mode->cur.val != 0) return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity); @@ -1283,24 +1288,27 @@ int msm_csid_register_entity(struct csid_device *csid, MSM_CSID_NAME, csid->id); v4l2_set_subdevdata(sd, csid);
- ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); - if (ret < 0) { - dev_err(dev, "Failed to init ctrl handler: %d\n", ret); - return ret; - } + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) { + ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); + if (ret < 0) { + dev_err(dev, "Failed to init ctrl handler: %d\n", ret); + return ret; + }
- csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls, - &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, - csid->testgen.nmodes, 0, 0, - csid->testgen.modes); + csid->testgen_mode = + v4l2_ctrl_new_std_menu_items(&csid->ctrls, + &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, + csid->testgen.nmodes, 0, 0, + csid->testgen.modes);
- if (csid->ctrls.error) { - dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); - ret = csid->ctrls.error; - goto free_ctrl; - } + if (csid->ctrls.error) { + dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); + ret = csid->ctrls.error; + goto free_ctrl; + }
- csid->subdev.ctrl_handler = &csid->ctrls; + csid->subdev.ctrl_handler = &csid->ctrls; + }
ret = csid_init_formats(sd, NULL); if (ret < 0) { @@ -1331,7 +1339,8 @@ int msm_csid_register_entity(struct csid_device *csid, media_cleanup: media_entity_cleanup(&sd->entity); free_ctrl: - v4l2_ctrl_handler_free(&csid->ctrls); + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) + v4l2_ctrl_handler_free(&csid->ctrls);
return ret; } @@ -1344,7 +1353,8 @@ void msm_csid_unregister_entity(struct csid_device *csid) { v4l2_device_unregister_subdev(&csid->subdev); media_entity_cleanup(&csid->subdev.entity); - v4l2_ctrl_handler_free(&csid->ctrls); + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) + v4l2_ctrl_handler_free(&csid->ctrls); }
inline bool csid_is_lite(struct csid_device *csid) diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c index 83c5a36d071f..8f6b0eccefb4 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe.c +++ b/drivers/media/platform/qcom/camss/camss-vfe.c @@ -398,6 +398,10 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code, return sink_code; } break; + default: + WARN(1, "Unsupported HW version: %x\n", + vfe->camss->res->version); + break; } return 0; } diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c index 67d3d6e50d2e..ed3a107965cc 100644 --- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c @@ -797,13 +797,12 @@ static int c8sectpfe_probe(struct platform_device *pdev) } tsin->i2c_adapter = of_find_i2c_adapter_by_node(i2c_bus); + of_node_put(i2c_bus); if (!tsin->i2c_adapter) { dev_err(&pdev->dev, "No i2c adapter found\n"); - of_node_put(i2c_bus); ret = -ENODEV; goto err_node_put; } - of_node_put(i2c_bus);
/* Acquire reset GPIO and activate it */ tsin->rst_gpio = devm_fwnode_gpiod_get(dev, diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c index 669bd96da4c7..273e8ed8c2a9 100644 --- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c @@ -789,9 +789,14 @@ static int vivid_thread_vid_cap(void *data) next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Video Capture Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c index fac6208b51da..015a7b166a1e 100644 --- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c +++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c @@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data) next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Video Output Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c index fa711ee36a3f..c862689786b6 100644 --- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c +++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c @@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data) next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Touch Capture Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c index 38cda33dffb2..97cfc58b7057 100644 --- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c @@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data) next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "SDR Capture Thread End\n"); return 0; diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index abb967c8bd35..6cb130eb32d5 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1722,6 +1722,8 @@ static void cx231xx_video_dev_init( vfd->lock = &dev->lock; vfd->release = video_device_release_empty; vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl; + vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | + V4L2_CAP_VIDEO_CAPTURE; video_set_drvdata(vfd, dev); if (dev->tuner_type == TUNER_ABSENT) { v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY); diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 4837d8df9c03..58d1bc80253e 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -862,6 +862,25 @@ static inline void uvc_clear_bit(u8 *data, int bit) data[bit >> 3] &= ~(1 << (bit & 7)); }
+static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val) +{ + unsigned int i; + + for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { + u32 menu_value; + + if (!test_bit(i, &mapping->menu_mask)) + continue; + + menu_value = uvc_mapping_get_menu_value(mapping, i); + + if (menu_value == val) + return i; + } + + return val; +} + /* * Extract the bit string specified by mapping->offset and mapping->size * from the little-endian data stored at 'data' and return the result as @@ -896,6 +915,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) value |= -(value & (1 << (mapping->size - 1)));
+ /* If it is a menu, convert from uvc to v4l2. */ + if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) + return value; + + switch (query) { + case UVC_GET_CUR: + case UVC_GET_DEF: + return uvc_menu_to_v4l2_menu(mapping, value); + } + return value; }
@@ -1060,32 +1089,6 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain, return 0; }
-static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping, - const u8 *data) -{ - s32 value = mapping->get(mapping, UVC_GET_CUR, data); - - if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { - unsigned int i; - - for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { - u32 menu_value; - - if (!test_bit(i, &mapping->menu_mask)) - continue; - - menu_value = uvc_mapping_get_menu_value(mapping, i); - - if (menu_value == value) { - value = i; - break; - } - } - } - - return value; -} - static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain, struct uvc_control *ctrl) { @@ -1136,8 +1139,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain, if (ret < 0) return ret;
- *value = __uvc_ctrl_get_value(mapping, - uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); + *value = mapping->get(mapping, UVC_GET_CUR, + uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
return 0; } @@ -1287,7 +1290,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, { struct uvc_control_mapping *master_map = NULL; struct uvc_control *master_ctrl = NULL; - unsigned int i;
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl)); v4l2_ctrl->id = mapping->id; @@ -1330,21 +1332,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1; v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1; v4l2_ctrl->step = 1; - - for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { - u32 menu_value; - - if (!test_bit(i, &mapping->menu_mask)) - continue; - - menu_value = uvc_mapping_get_menu_value(mapping, i); - - if (menu_value == v4l2_ctrl->default_value) { - v4l2_ctrl->default_value = i; - break; - } - } - return 0;
case V4L2_CTRL_TYPE_BOOLEAN: @@ -1627,7 +1614,7 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain, uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) { - s32 value = __uvc_ctrl_get_value(mapping, data); + s32 value = mapping->get(mapping, UVC_GET_CUR, data);
/* * handle may be NULL here if the device sends auto-update diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 7bcd706281da..cb7d9fb589fc 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -106,6 +106,12 @@ static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain, struct uvc_control_mapping *map; int ret;
+ if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) { + uvc_dbg(chain->dev, CONTROL, + "Unsupported UVC data type %u\n", xmap->data_type); + return -EINVAL; + } + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 3a4ba08810d2..1193852bad59 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -439,6 +439,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd, static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_config *config) { + memset(config, 0, sizeof(*config)); + return check_pad(sd, pad) ? : sd->ops->pad->get_mbus_config(sd, pad, config); } diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index 4051551757f2..378092903971 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -214,6 +214,7 @@ static const struct regmap_range axp717_writeable_ranges[] = { regmap_reg_range(AXP717_VSYS_V_POWEROFF, AXP717_VSYS_V_POWEROFF), regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN), regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE), + regmap_reg_range(AXP717_TS_PIN_CFG, AXP717_TS_PIN_CFG), regmap_reg_range(AXP717_ICC_CHG_SET, AXP717_CV_CHG_SET), regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL), regmap_reg_range(AXP717_ADC_CH_EN_CONTROL, AXP717_ADC_CH_EN_CONTROL), diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c index 57ff5cb294a6..d3b77abec786 100644 --- a/drivers/mfd/tps65219.c +++ b/drivers/mfd/tps65219.c @@ -228,7 +228,6 @@ static const struct regmap_irq_chip tps65219_irq_chip = { static int tps65219_probe(struct i2c_client *client) { struct tps65219 *tps; - unsigned int chipid; bool pwr_button; int ret;
@@ -253,12 +252,6 @@ static int tps65219_probe(struct i2c_client *client) if (ret) return ret;
- ret = regmap_read(tps->regmap, TPS65219_REG_TI_DEV_ID, &chipid); - if (ret) { - dev_err(tps->dev, "Failed to read device ID: %d\n", ret); - return ret; - } - ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65219_cells, ARRAY_SIZE(tps65219_cells), NULL, 0, regmap_irq_get_domain(tps->irq_data)); diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index 89224d4af4a2..e13f9fdd9d7b 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -304,6 +304,10 @@ static int ee1004_probe(struct i2c_client *client) I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA)) return -EPFNOSUPPORT;
+ err = i2c_smbus_read_byte(client); + if (err < 0) + return -ENODEV; + mutex_lock(&ee1004_bus_lock);
err = ee1004_init_bus_data(client); diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index eb51fbe8d92f..ad7c7f157319 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -71,8 +71,8 @@ struct vsc_tp { u32 seq;
/* command buffer */ - void *tx_buf; - void *rx_buf; + struct vsc_tp_packet *tx_buf; + struct vsc_tp_packet *rx_buf;
atomic_t assert_cnt; wait_queue_head_t xfer_wait; @@ -164,7 +164,7 @@ static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt, { int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr); int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES; - u8 *src, *crc_src, *rx_buf = tp->rx_buf; + u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf; int count_down = VSC_TP_MAX_XFER_COUNT; u32 recv_crc = 0, crc = ~0; struct vsc_tp_packet_hdr ack; @@ -324,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) guard(mutex)(&tp->mutex);
/* rom xfer is big endian */ - cpu_to_be32_array(tp->tx_buf, obuf, words); + cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words);
ret = read_poll_timeout(gpiod_get_value_cansleep, ret, !ret, VSC_TP_ROM_XFER_POLL_DELAY_US, @@ -340,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) return ret;
if (ibuf) - be32_to_cpu_array(ibuf, tp->rx_buf, words); + be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words);
return ret; } @@ -496,11 +496,11 @@ static int vsc_tp_probe(struct spi_device *spi) if (!tp) return -ENOMEM;
- tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL); + tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL); if (!tp->tx_buf) return -ENOMEM;
- tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL); + tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL); if (!tp->rx_buf) return -ENOMEM;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index e22afb420d09..f05256b7c208 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -287,11 +287,13 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, void *read_buf __free(kfree) = NULL; struct pci_dev *pdev = test->pdev;
+ bar_size = pci_resource_len(pdev, barno); + if (!bar_size) + return -ENODATA; + if (!test->bar[barno]) return false;
- bar_size = pci_resource_len(pdev, barno); - if (barno == test->test_reg_bar) bar_size = 0x4;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 6dc057718d2c..89682f10e69f 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -27,6 +27,8 @@ enum dw_mci_exynos_type { DW_MCI_TYPE_EXYNOS5420_SMU, DW_MCI_TYPE_EXYNOS7, DW_MCI_TYPE_EXYNOS7_SMU, + DW_MCI_TYPE_EXYNOS7870, + DW_MCI_TYPE_EXYNOS7870_SMU, DW_MCI_TYPE_ARTPEC8, };
@@ -69,6 +71,12 @@ static struct dw_mci_exynos_compatible { }, { .compatible = "samsung,exynos7-dw-mshc-smu", .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU, + }, { + .compatible = "samsung,exynos7870-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS7870, + }, { + .compatible = "samsung,exynos7870-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU, }, { .compatible = "axis,artpec8-dw-mshc", .ctrl_type = DW_MCI_TYPE_ARTPEC8, @@ -85,6 +93,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host) return EXYNOS4210_FIXED_CIU_CLK_DIV; else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1; else @@ -100,7 +110,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host) * set for non-ecryption mode at this time. */ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU || - priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) { + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) { mci_writel(host, MPSBEGIN0, 0); mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX); mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT | @@ -126,6 +137,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host) DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl); }
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) { + /* Quirk needed for certain Exynos SoCs */ + host->quirks |= DW_MMC_QUIRK_FIFO64_32; + } + if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) { /* Quirk needed for the ARTPEC-8 SoC */ host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT; @@ -143,6 +160,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -152,6 +171,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -222,6 +243,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -230,6 +253,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev) if (clksel & SDMMC_CLKSEL_WAKEUP_INT) { if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -409,6 +434,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64)); else @@ -422,6 +449,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -429,6 +458,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -443,6 +474,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -453,6 +486,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -632,6 +667,10 @@ static const struct of_device_id dw_mci_exynos_match[] = { .data = &exynos_drv_data, }, { .compatible = "samsung,exynos7-dw-mshc-smu", .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7870-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7870-dw-mshc-smu", + .data = &exynos_drv_data, }, { .compatible = "axis,artpec8-dw-mshc", .data = &artpec_drv_data, }, {}, diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 2e2e15e2d8fb..b0b1d403f352 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -609,8 +609,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power(host, mode, vdd);
- if (mode == MMC_POWER_OFF) + if (mode == MMC_POWER_OFF) { + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD) + usleep_range(15000, 17500); return; + }
/* * Bus power might not enable after D3 -> D0 transition due to the diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 4b91c9e96635..8ae76300d157 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2035,10 +2035,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_CARD_EN) + sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, + SDHCI_CLOCK_CONTROL);
- if (clock == 0) + if (clock == 0) { + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); return; + }
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); sdhci_enable_clk(host, clk); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4d73abae503d..4d2e30f4ee25 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2542,7 +2542,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac || + if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && bond_has_slaves(bond)) diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 399844809bbe..bb6071a758f3 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -324,7 +324,7 @@ static int c_can_plat_probe(struct platform_device *pdev) /* Check if we need custom RAMINIT via syscon. Mostly for TI * platforms. Only supported with DT boot. */ - if (np && of_property_read_bool(np, "syscon-raminit")) { + if (np && of_property_present(np, "syscon-raminit")) { u32 id; struct c_can_raminit *raminit = &priv->raminit_sys;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index fee012b57f33..020e5897812f 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/timer.h> +#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Kvaser AB support@kvaser.com"); @@ -410,10 +411,13 @@ struct kvaser_pciefd_can { void __iomem *reg_base; struct can_berr_counter bec; u8 cmd_seq; + u8 tx_max_count; + u8 tx_idx; + u8 ack_idx; int err_rep_cnt; - int echo_idx; + unsigned int completed_tx_pkts; + unsigned int completed_tx_bytes; spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ - spinlock_t echo_lock; /* Locks the message echo buffer */ struct timer_list bec_poll_timer; struct completion start_comp, flush_comp; }; @@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev) int ret; struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ can->tx_idx = 0; + can->ack_idx = 0; + ret = open_candev(netdev); if (ret) return ret; @@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev) del_timer(&can->bec_poll_timer); } can->can.state = CAN_STATE_STOPPED; + netdev_reset_queue(netdev); close_candev(netdev);
return ret; }
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can) +{ + return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx)); +} + static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, - struct kvaser_pciefd_can *can, + struct can_priv *can, u8 seq, struct sk_buff *skb) { struct canfd_frame *cf = (struct canfd_frame *)skb->data; int packet_size; - int seq = can->echo_idx;
memset(p, 0, sizeof(*p)); - if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT) p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG) @@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, } else { p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, - can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); + can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode)); }
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); @@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); - unsigned long irq_flags; struct kvaser_pciefd_tx_packet packet; + unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1); + unsigned int frame_len; int nr_words; - u8 count;
if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; + if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1)) + return NETDEV_TX_BUSY;
- nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); + nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags); /* Prepare and save echo skb in internal slot */ - can_put_echo_skb(skb, netdev, can->echo_idx, 0); - - /* Move echo index to the next slot */ - can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; + WRITE_ONCE(can->can.echo_skb[seq], NULL); + frame_len = can_skb_get_frame_len(skb); + can_put_echo_skb(skb, netdev, seq, frame_len); + netdev_sent_queue(netdev, frame_len); + WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */ iowrite32(packet.header[0], @@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, KVASER_PCIEFD_KCAN_FIFO_LAST_REG); }
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); - /* No room for a new message, stop the queue until at least one - * successful transmit - */ - if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) - netif_stop_queue(netdev); - spin_unlock_irqrestore(&can->echo_lock, irq_flags); + netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK; } @@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; can->bec.txerr = 0; can->bec.rxerr = 0;
@@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) tx_nr_packets_max = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq; - can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); - can->echo_idx = 0; - spin_lock_init(&can->echo_lock); + can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count); spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const; @@ -1200,7 +1208,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_canfd_skb(priv->dev, &cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; }
cf->len = can_fd_dlc2len(dlc); @@ -1212,7 +1220,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); if (!skb) { priv->dev->stats.rx_dropped++; - return -ENOMEM; + return 0; } can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); } @@ -1230,7 +1238,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, priv->dev->stats.rx_packets++; kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- return netif_rx(skb); + netif_rx(skb); + + return 0; }
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, @@ -1507,19 +1517,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, netdev_dbg(can->can.dev, "Packet was flushed\n"); } else { int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); - int len; - u8 count; + unsigned int len, frame_len = 0; struct sk_buff *skb;
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1))) + return 0; skb = can->can.echo_skb[echo_idx]; - if (skb) - kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); - len = can_get_echo_skb(can->can.dev, echo_idx, NULL); - count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, - ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); + if (!skb) + return 0; + kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); + len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) - netif_wake_queue(can->can.dev); + /* Pairs with barrier in kvaser_pciefd_start_xmit() */ + smp_store_release(&can->ack_idx, can->ack_idx + 1); + can->completed_tx_pkts++; + can->completed_tx_bytes += frame_len;
if (!one_shot_fail) { can->can.dev->stats.tx_bytes += len; @@ -1635,11 +1647,26 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) { int pos = 0; int res = 0; + unsigned int i;
do { res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */ + for (i = 0; i < pcie->nr_channels; ++i) { + struct kvaser_pciefd_can *can = pcie->can[i]; + + if (!can->completed_tx_pkts) + continue; + netif_subqueue_completed_wake(can->can.dev, 0, + can->completed_tx_pkts, + can->completed_tx_bytes, + kvaser_pciefd_tx_avail(can), 1); + can->completed_tx_pkts = 0; + can->completed_tx_bytes = 0; + } + return res; }
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index 24c6622d36bd..58ff2ec1d975 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi dario.binacchi@amarulasolutions.com"); #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 +#define SLCAN_DATA_LENGTH_LEN 1 +#define SLCAN_ERROR_LEN 1 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 -#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ - SLCAN_STATE_BE_RXCNT_LEN + \ - SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ + SLCAN_STATE_LEN + \ + SLCAN_STATE_BE_RXCNT_LEN + \ + SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_ERROR_LEN + \ + SLCAN_DATA_LENGTH_LEN) +#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_SFF_ID_LEN + \ + SLCAN_DATA_LENGTH_LEN) struct slcan { struct can_priv can;
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff;
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) + return; + skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; @@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl) return; }
- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) + if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; @@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl) bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len;
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) + return; + /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') @@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl) static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 4) + if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) slcan_bump(sl);
sl->rcount = 0; diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 9e90c2381491..68335935cea7 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[]; - static int xge_get_resources(struct xge_pdata *pdata) { struct platform_device *pdev; @@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match); static struct platform_driver xge_driver = { .driver = { .name = "xgene-enet-v2", - .acpi_match_table = ACPI_PTR(xge_acpi_match), + .acpi_match_table = xge_acpi_match, }, .probe = xge_probe, .remove_new = xge_remove, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 12b61a6fcda4..2bb1fce350db 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11783,6 +11783,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) struct hwrm_func_drv_if_change_input *req; bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; + bool caps_change = false; int rc, retry = 0; u32 flags = 0;
@@ -11838,8 +11839,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return -ENODEV; } - if (resc_reinit || fw_reset) { - if (fw_reset) { + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) + caps_change = true; + + if (resc_reinit || fw_reset || caps_change) { + if (fw_reset || caps_change) { set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_irq_stop(bp); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index f662a5d54986..d8272b7a55fc 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1572,6 +1572,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, } }
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } +} + static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, struct napi_struct *napi, int work_limit, struct bpf_prog *prog) @@ -1687,11 +1697,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); rx_ring->stats.xdp_redirect_failures++; } else { - while (orig_i != i) { - enetc_flip_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); - enetc_bdr_idx_inc(rx_ring, &orig_i); - } + enetc_bulk_flip_buff(rx_ring, orig_i, i); xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 04906897615d..479ced24096b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1098,6 +1098,29 @@ static void fec_enet_enable_ring(struct net_device *ndev) } }
+/* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ +static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) +{ + u32 val; + + if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || + ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); + udelay(10); + } + } else { + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + } +} + /* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network @@ -1114,17 +1137,7 @@ fec_restart(struct net_device *ndev) if (fep->bufdesc_ex) fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || - ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - } + fec_ctrl_reset(fep, false);
/* * enet-mac reset will reset mac address registers too, @@ -1378,22 +1391,7 @@ fec_stop(struct net_device *ndev) if (fep->bufdesc_ex) fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); - udelay(10); - } - } else { - val = readl(fep->hwp + FEC_ECNTRL); - val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); - writel(val, fep->hwp + FEC_ECNTRL); - } + fec_ctrl_reset(fep, true); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 7d1feeb317be..2a2acbeb5722 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3817,8 +3817,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring) - combined++; + combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); }
return combined; diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c index ad82ff7d1995..09f9c7ba5279 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.c +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -45,7 +45,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) /** * ice_get_irq_res - get an interrupt resource * @pf: board private structure - * @dyn_only: force entry to be dynamically allocated + * @dyn_allowed: allow entry to be dynamically allocated * * Allocate new irq entry in the free slot of the tracker. Since xarray * is used, always allocate new entry at the lowest possible index. Set @@ -53,11 +53,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) * * Returns allocated irq entry or NULL on failure. */ -static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) +static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, + bool dyn_allowed) { - struct xa_limit limit = { .max = pf->irq_tracker.num_entries, + struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1, .min = 0 }; - unsigned int num_static = pf->irq_tracker.num_static; + unsigned int num_static = pf->irq_tracker.num_static - 1; struct ice_irq_entry *entry; unsigned int index; int ret; @@ -66,9 +67,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) if (!entry) return NULL;
- /* skip preallocated entries if the caller says so */ - if (dyn_only) - limit.min = num_static; + /* only already allocated if the caller says so */ + if (!dyn_allowed) + limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, GFP_KERNEL); @@ -78,7 +79,7 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) entry = NULL; } else { entry->index = index; - entry->dynamic = index >= num_static; + entry->dynamic = index > num_static; }
return entry; @@ -272,7 +273,7 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) /** * ice_alloc_irq - Allocate new interrupt vector * @pf: board private structure - * @dyn_only: force dynamic allocation of the interrupt + * @dyn_allowed: allow dynamic allocation of the interrupt * * Allocate new interrupt vector for a given owner id. * return struct msi_map with interrupt details and track @@ -285,20 +286,20 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) * interrupt will be allocated with pci_msix_alloc_irq_at. * * Some callers may only support dynamically allocated interrupts. - * This is indicated with dyn_only flag. + * This is indicated with dyn_allowed flag. * * On failure, return map with negative .index. The caller * is expected to check returned map index. * */ -struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed) { int sriov_base_vector = pf->sriov_base_vector; struct msi_map map = { .index = -ENOENT }; struct device *dev = ice_pf_to_dev(pf); struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only); + entry = ice_get_irq_res(pf, dyn_allowed); if (!entry) return map;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 22371011c249..2410aee59fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) */ if (!primary_lag) { lag->primary = true; + if (!ice_is_switchdev_running(lag->pf)) + return; + /* Configure primary's SWID to be shared */ ice_lag_primary_swid(lag, true); primary_lag = lag; } else { u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf)) + return; + swid = primary_lag->pf->hw.port_info->sw_id; ice_lag_set_swid(swid, lag, true); ice_lag_add_prune_list(primary_lag, lag->pf); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 121a5ad5c8e1..8961eebe67aa 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -567,6 +567,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) return -ENOMEM; }
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); + switch (vsi->type) { case ICE_VSI_PF: case ICE_VSI_SF: diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ca707dfcb286..63d2105fce93 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5175,11 +5175,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf); + err = ice_init_rdma(pf); if (err) goto err_init_rdma;
- ice_init_features(pf); ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state); @@ -5187,6 +5188,7 @@ int ice_load(struct ice_pf *pf) return 0;
err_init_rdma: + ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); err_tc_indir_block_register: ice_unregister_netdev(vsi); @@ -5210,8 +5212,8 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf); ice_deinit_rdma(pf); + ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); ice_unregister_netdev(vsi); ice_devlink_destroy_pf_port(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index c8c1d48ff793..87ffd25b268a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -3877,7 +3877,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) }
ice_vfhw_mac_add(vf, &al->list[i]); - vf->num_mac++; break; }
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index aef0e9775a33..70dbf80f3bb7 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -143,6 +143,7 @@ enum idpf_vport_state { * @vport_id: Vport identifier * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index + * @max_tx_hdr_size: Max header length hardware can support * @state: See enum idpf_vport_state * @netstats: Packet and byte stats * @stats_lock: Lock to protect stats update @@ -153,6 +154,7 @@ struct idpf_netdev_priv { u32 vport_id; u32 link_speed_mbps; u16 vport_idx; + u16 max_tx_hdr_size; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; spinlock_t stats_lock; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 5ce663d04de0..615e74d03845 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -723,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->vport = vport; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev); @@ -740,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->adapter = adapter; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
spin_lock_init(&np->stats_lock);
@@ -2189,8 +2191,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_adapter *adapter = vport->adapter; + struct idpf_netdev_priv *np = netdev_priv(netdev); + u16 max_tx_hdr_size = np->max_tx_hdr_size; size_t len;
/* No point in doing any of this if neither checksum nor GSO are @@ -2213,7 +2215,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, goto unsupported;
len = skb_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported;
if (!skb->encapsulation) @@ -2226,7 +2228,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */ len = skb_inner_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index afc902ae4763..623bf17f87f9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -4022,6 +4022,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) return budget; }
+ /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that. + */ + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) + return budget; + work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might @@ -4032,15 +4040,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) else idpf_vport_intr_set_wb_on_itr(q_vector);
- /* Switch to poll mode in the tear-down path after sending disable - * queues virtchnl message, as the interrupts will be disabled after - * that - */ - if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, - q_vector->tx[0]))) - return budget; - else - return work_done; + return work_done; }
/** diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index e43c4608d3ba..971993586fb4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) }, { 0, } /* end of table */ };
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 8555edbb1c8f..f94bf04788e9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -30,6 +30,8 @@ #define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 +#define PCI_SUBSYS_DEVID_CN20KA 0xC220 +#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..4a3370a40dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; }
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 87ba77e5026a..e24accfecb3f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -580,6 +580,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -595,8 +596,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -618,7 +623,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -631,7 +636,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 7510a918d942..f75afcf5f5ae 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -988,6 +988,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; + struct otx2_pool *pool;
cq = &qset->cq[qidx]; cq->cq_idx = qidx; @@ -996,8 +997,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - if (pfvf->xdp_prog) + if (pfvf->xdp_prog) { + pool = &qset->pool[qidx]; xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, + MEM_TYPE_PAGE_POOL, + pool->page_pool); + } } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index f20bb390df3a..c855fb799ce1 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -34,8 +34,10 @@ struct mtk_flow_data { u16 vlan_in;
struct { - u16 id; - __be16 proto; + struct { + u16 id; + __be16 proto; + } vlans[2]; u8 num; } vlan; struct { @@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, case FLOW_ACTION_CSUM: break; case FLOW_ACTION_VLAN_PUSH: - if (data.vlan.num == 1 || + if (data.vlan.num + data.pppoe.num == 2 || act->vlan.proto != htons(ETH_P_8021Q)) return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid; - data.vlan.proto = act->vlan.proto; + data.vlan.vlans[data.vlan.num].id = act->vlan.vid; + data.vlan.vlans[data.vlan.num].proto = act->vlan.proto; data.vlan.num++; break; case FLOW_ACTION_VLAN_POP: break; case FLOW_ACTION_PPPOE_PUSH: - if (data.pppoe.num == 1) + if (data.pppoe.num == 1 || + data.vlan.num == 2) return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid; @@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) { - if (data.vlan.proto != htons(ETH_P_8021Q)) - return -EOPNOTSUPP; + for (i = 0; i < data.vlan.num; i++) + mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id); - } if (data.pppoe.num == 1) mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index b330020dc0d6..f2bded847e61 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -682,9 +682,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) }
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, - struct mlx4_db *db, int order) + struct mlx4_db *db, unsigned int order) { - int o; + unsigned int o; int i;
for (o = order; o <= 1; ++o) { @@ -712,7 +712,7 @@ static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, return 0; }
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_db_pgdir *pgdir; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1ddb11cb25f9..6e077d202827 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -450,6 +450,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up)) return 0; + if (unlikely(!napi_budget) && cq->type == TX_XDP) + return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 57b7298a0e79..e048a667e075 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -94,8 +94,6 @@ struct page_pool; #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 - /* Keep in sync with mlx5e_mpwrq_log_wqe_sz. * These are theoretical maximums, which can be further restricted by * capabilities. These values are used for static resource allocations and @@ -385,7 +383,6 @@ enum { MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, MLX5E_SQ_STATE_PENDING_XSK_TX, MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, - MLX5E_SQ_STATE_XDP_MULTIBUF, MLX5E_NUM_SQ_STATES, /* Must be kept last */ };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 31eb99f09c63..58ec5e44aa7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -10,6 +10,9 @@ #include <net/page_pool/types.h> #include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 +#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17 + static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) { u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); @@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) { u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); - u8 max_pages_per_wqe, max_log_mpwqe_size; + u8 max_pages_per_wqe, max_log_wqe_size_calc; + u8 max_log_wqe_size_cap; u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size; - max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; + max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift; + + WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); + max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ? + MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); + return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap); }
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, @@ -1242,7 +1249,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, mlx5e_build_sq_param_common(mdev, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); - param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 3f8986f9d862..bd5877acc5b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -33,7 +33,6 @@ struct mlx5e_sq_param { struct mlx5_wq_param wq; bool is_mpw; bool is_tls; - bool is_xdp_mb; u16 stop_room; };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index c8adf309ecad..dbd9482359e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = { [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline", [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx", [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync", - [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf", };
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 4610621a340e..08ab0999f7b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -546,6 +546,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, bool inline_ok; bool linear; u16 pi; + int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +613,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { - int i; - - memset(&cseg->trailer, 0, sizeof(cseg->trailer)); - memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); - - eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); + memset(&cseg->trailer, 0, sizeof(cseg->trailer)); + memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) { - skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; - dma_addr_t addr; + eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : - page_pool_get_dma_addr(skb_frag_page(frag)) + - skb_frag_off(frag); + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; + dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr); - dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); - dseg->lkey = sq->mkey_be; - dseg++; - } + addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : + page_pool_get_dma_addr(skb_frag_page(frag)) + + skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + dseg->addr = cpu_to_be64(addr); + dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); + dseg->lkey = sq->mkey_be; + dseg++; + }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { - .num_wqebbs = num_wqebbs, - .num_pkts = 1, - }; + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs; - } else { - cseg->fm_ce_se = 0; + sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { + .num_wqebbs = num_wqebbs, + .num_pkts = 1, + };
- sq->pc++; - } + sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 57861d34d46f..59b9653f573c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -165,6 +165,25 @@ static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, #endif }
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_spec *spec) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + + if (rx == ipsec->rx_esw) { + mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec); + } else { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + } +} + static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx) { @@ -200,11 +219,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); - MLX5_SET(fte_match_param, spec->match_value, - misc_parameters_2.metadata_reg_c_2, - sa_entry->ipsec_obj_id | BIT(31)); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + ipsec_rx_rule_add_match_obj(sa_entry, rx, spec); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -281,10 +297,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2, - sa_entry->ipsec_obj_id | BIT(31)); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + ipsec_rx_rule_add_match_obj(sa_entry, rx, spec); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3e9ad3cb8121..4a2f58a9d706 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2030,41 +2030,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, csp.min_inline_mode = sq->min_inline_mode; set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb) - set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state); - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); if (err) goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { - unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1; - unsigned int inline_hdr_sz = 0; - int i; - - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - inline_hdr_sz = MLX5E_XDP_MIN_INLINE; - ds_cnt++; - } - - /* Pre initialize fixed WQE fields */ - for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - - sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) { - .num_wqebbs = 1, - .num_pkts = 1, - }; - - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); - } - } - return 0;
err_free_xdpsq: @@ -3788,8 +3759,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, /* MQPRIO is another toplevel qdisc that can't be attached * simultaneously with the offloaded HTB. */ - if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq))) - return -EINVAL; + if (mlx5e_selq_is_htb_enabled(&priv->selq)) { + NL_SET_ERR_MSG_MOD(mqprio->extack, + "MQPRIO cannot be configured when HTB offload is enabled."); + return -EOPNOTSUPP; + }
switch (mqprio->mode) { case TC_MQPRIO_MODE_DCB: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 0657d1076535..18ec392d1740 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -65,6 +65,7 @@ #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 +#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -854,6 +855,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */ mlx5e_build_rq_params(mdev, params); + if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev)) + params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink, * RTNL lock must be held before triggering netdev notifiers. @@ -885,6 +888,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ; + if (mlx5_core_is_ecpf(mdev)) + netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 1d60465cc2ca..2f7a543feca6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, struct udphdr *udph; struct iphdr *iph;
+ if (skb_linearize(skb)) + goto out; + /* We are only going to peek, no need to clone the SKB */ if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index ed977ae75fab..4bba2884c1c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -85,6 +85,19 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, return err; }
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_spec *spec) +{ + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_1, + ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_1, + sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; +} + void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5e_ipsec *ipsec = sa_entry->ipsec; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h index ac9c65b89166..514c15258b1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h @@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx_create_attr *attr); void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev); +void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_spec *spec); #else static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx_create_attr *attr) {} @@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {} +static inline void +mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_spec *spec) {} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESW_IPSEC_FS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 8587cd572da5..bdb825aa8726 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) if (!flow_group_in) return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE; + ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE; ft_attr.prio = LEGACY_FDB_PRIO; fdb = mlx5_create_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index d91ea53eb394..fc6e56305cbb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -163,11 +163,16 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data) u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + /* bit 1-63 are not supported for NICs, + * hence read only bit 0 (asic) from lsb. + */ + value_lsb &= 0x1; value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev, - "High temperature on sensors with bit set %llx %llx", - value_msb, value_lsb); + if (net_ratelimit()) + mlx5_core_warn(events->dev, + "High temperature on sensors with bit set %llx %llx", + value_msb, value_lsb);
return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c index c14590acc772..f6abfd00d7e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c @@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) { - if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size && + if (dev->priv.ft_pool->ft_left[i] && + (FT_POOLS[i] >= desired_size || + desired_size == MLX5_FS_MAX_POOL_SIZE) && FT_POOLS[i] <= max_ft_size) { found_i = i; - if (desired_size != POOL_NEXT_SIZE) + if (desired_size != MLX5_FS_MAX_POOL_SIZE) break; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h index 25f4274b372b..173e312db720 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h @@ -7,8 +7,6 @@ #include <linux/mlx5/driver.h> #include "fs_core.h"
-#define POOL_NEXT_SIZE 0 - int mlx5_ft_pool_init(struct mlx5_core_dev *dev); void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index a6329ca2d9bf..52c8035547be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -799,6 +799,7 @@ static void poll_health(struct timer_list *t) health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); + health->synd = ioread8(&h->synd); print_health_info(dev); queue_work(health->wq, &health->report_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index 711d14dea248..d313cb7f0ed8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE; + sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? + FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE; ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index a400616a24d4..79e94632533c 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -544,6 +544,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) fbnic_rss_key_fill(fbn->rss_key); fbnic_rss_init_en_mask(fbn);
+ netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->features |= NETIF_F_RXHASH | NETIF_F_SG | diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 547255ca1c4e..812ad9d61676 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -3466,6 +3466,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, struct pci_dev *pdev) { struct lan743x_tx *tx; + u32 sgmii_ctl; int index; int ret;
@@ -3478,6 +3479,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, spin_lock_init(&adapter->eth_syslock_spinlock); mutex_init(&adapter->sgmii_rw_lock); pci11x1x_set_rfe_rd_fifo_threshold(adapter); + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + if (adapter->is_sgmii_en) { + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + } else { + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + } + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); } else { adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; @@ -3526,7 +3536,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) { - u32 sgmii_ctl; int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); @@ -3538,10 +3547,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) adapter->mdiobus->priv = (void *)adapter; if (adapter->is_pci11x1x) { if (adapter->is_sgmii_en) { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "SGMII operation\n"); adapter->mdiobus->read = lan743x_mdiobus_read_c22; @@ -3552,10 +3557,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); } else { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "RGMII operation\n"); // Only C22 support when RGMII I/F diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 36802e0a8b57..9bac4083d8a0 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1044,7 +1044,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) { - WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2); + WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 5ed2818bac25..85bb5121cd24 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2850,6 +2850,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(tp, CSIDR) : ~0; }
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + int rc; + u8 val; + +#define RTL_GEN3_RELATED_OFF 0x0890 +#define RTL_GEN3_ZRXDC_NONCOMPL 0x1 + if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) { + rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val); + if (rc == PCIBIOS_SUCCESSFUL) { + val &= ~RTL_GEN3_ZRXDC_NONCOMPL; + rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF, + val); + if (rc == PCIBIOS_SUCCESSFUL) + return; + } + } + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF); + rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL); +} + static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) { struct pci_dev *pdev = tp->pci_dev; @@ -3816,6 +3842,7 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp) { + rtl_disable_zrxdc_timeout(tp); rtl_set_def_aspm_entry_latency(tp); rtl_hw_start_8125_common(tp); } @@ -5231,6 +5258,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; + new_bus->phy_mask = GENMASK(31, 1); snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", pci_domain_nr(pdev->bus), pci_dev_id(pdev));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index ab7c2750c104..702ea5a00b56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -590,6 +590,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (ret) goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use; + plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use; + if (dev_of_node(&pdev->dev)) ret = loongson_dwmac_dt_config(pdev, plat, &res); else diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 50073bdade46..8f90eae93774 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -33,6 +33,7 @@ struct rk_gmac_ops { void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input, bool enable); void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv); + bool php_grf_required; bool regs_valid; u32 regs[]; }; @@ -1263,6 +1264,7 @@ static const struct rk_gmac_ops rk3576_ops = { .set_rgmii_speed = rk3576_set_gmac_speed, .set_rmii_speed = rk3576_set_gmac_speed, .set_clock_selection = rk3576_set_clock_selection, + .php_grf_required = true, .regs_valid = true, .regs = { 0x2a220000, /* gmac0 */ @@ -1410,6 +1412,7 @@ static const struct rk_gmac_ops rk3588_ops = { .set_rgmii_speed = rk3588_set_gmac_speed, .set_rmii_speed = rk3588_set_gmac_speed, .set_clock_selection = rk3588_set_clock_selection, + .php_grf_required = true, .regs_valid = true, .regs = { 0xfe1b0000, /* gmac0 */ @@ -1830,8 +1833,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node, - "rockchip,php-grf"); + if (IS_ERR(bsp_priv->grf)) { + dev_err_probe(dev, PTR_ERR(bsp_priv->grf), + "failed to lookup rockchip,grf\n"); + return ERR_CAST(bsp_priv->grf); + } + + if (ops->php_grf_required) { + bsp_priv->php_grf = + syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,php-grf"); + if (IS_ERR(bsp_priv->php_grf)) { + dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf), + "failed to lookup rockchip,php-grf\n"); + return ERR_CAST(bsp_priv->php_grf); + } + }
if (plat->phy_node) { bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4a0ae92b3055..ce8367b63823 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY * address. No need to mask it again. */ - reg |= 1 << H3_EPHY_ADDR_SHIFT; + reg |= ret << H3_EPHY_ADDR_SHIFT; } else { /* For SoCs without internal PHY the PHY selection bit should be * set to 0 (external PHY). diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c index 259bdac24cf2..558b791a97ed 100644 --- a/drivers/net/ethernet/tehuti/tn40.c +++ b/drivers/net/ethernet/tehuti/tn40.c @@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ret = tn40_phy_register(priv); if (ret) { dev_err(&pdev->dev, "failed to set up PHY.\n"); - goto err_free_irq; + goto err_cleanup_swnodes; }
ret = tn40_priv_init(priv); @@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_unregister_phydev: tn40_phy_unregister(priv); +err_cleanup_swnodes: + tn40_swnodes_cleanup(priv); err_free_irq: pci_free_irq_vectors(pdev); err_unset_drvdata: @@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev) unregister_netdev(ndev);
tn40_phy_unregister(priv); + tn40_swnodes_cleanup(priv); pci_free_irq_vectors(priv->pdev); pci_set_drvdata(pdev, NULL); iounmap(priv->regs); @@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = { PCI_VENDOR_ID_ASUSTEK, 0x8709) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, PCI_VENDOR_ID_EDIMAX, 0x8103) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510, + PCI_VENDOR_ID_TEHUTI, 0x3015) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510, + PCI_VENDOR_ID_EDIMAX, 0x8102) }, { } };
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h index 490781fe5120..25da8686d469 100644 --- a/drivers/net/ethernet/tehuti/tn40.h +++ b/drivers/net/ethernet/tehuti/tn40.h @@ -4,10 +4,13 @@ #ifndef _TN40_H_ #define _TN40_H_
+#include <linux/property.h> #include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025 + #define TN40_MDIO_SPEED_1MHZ (1) #define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb { int size; /* Number of elements in the db */ };
+#define NODE_PROP(_NAME, _PROP) ( \ + (const struct software_node) { \ + .name = _NAME, \ + .properties = _PROP, \ + }) + +#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \ + (const struct software_node) { \ + .name = _NAME, \ + .parent = _PAR, \ + .properties = _PROP, \ + }) + +enum tn40_swnodes { + SWNODE_MDIO, + SWNODE_PHY, + SWNODE_MAX +}; + +struct tn40_nodes { + char phy_name[32]; + char mdio_name[32]; + struct property_entry phy_props[3]; + struct software_node swnodes[SWNODE_MAX]; + const struct software_node *group[SWNODE_MAX + 1]; +}; + struct tn40_priv { struct net_device *ndev; struct pci_dev *pdev;
+ struct tn40_nodes nodes; + struct napi_struct napi; /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */ struct tn40_rxd_fifo rxd_fifo0; @@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv); int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv); diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c index af18615d64a8..5bb0cbc87d06 100644 --- a/drivers/net/ethernet/tehuti/tn40_mdio.c +++ b/drivers/net/ethernet/tehuti/tn40_mdio.c @@ -14,6 +14,8 @@ (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port)))) #define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld" + static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed) { void __iomem *regs = priv->regs; @@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum, return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val); }
+/* registers an mdio node and an aqr105 PHY at address 1 + * tn40_mdio-%id { + * ethernet-phy@1 { + * compatible = "ethernet-phy-id03a1.b4a3"; + * reg = <1>; + * firmware-name = AQR105_FIRMWARE; + * }; + * }; + */ +static int tn40_swnodes_register(struct tn40_priv *priv) +{ + struct tn40_nodes *nodes = &priv->nodes; + struct pci_dev *pdev = priv->pdev; + struct software_node *swnodes; + u32 id; + + id = pci_dev_id(pdev); + + snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1"); + snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x", + id); + + swnodes = nodes->swnodes; + + swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL); + + nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible", + "ethernet-phy-id03a1.b4a3"); + nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1); + nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name", + AQR105_FIRMWARE); + swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name, + &swnodes[SWNODE_MDIO], + nodes->phy_props); + + nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY]; + nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO]; + return software_node_register_node_group(nodes->group); +} + +void tn40_swnodes_cleanup(struct tn40_priv *priv) +{ + /* cleanup of swnodes is only needed for AQR105-based cards */ + if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) { + fwnode_handle_put(dev_fwnode(&priv->mdio->dev)); + device_remove_software_node(&priv->mdio->dev); + software_node_unregister_node_group(priv->nodes.group); + } +} + int tn40_mdiobus_init(struct tn40_priv *priv) { struct pci_dev *pdev = priv->pdev; @@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45; bus->write_c45 = tn40_mdio_write_c45; + priv->mdio = bus; + + /* provide swnodes for AQR105-based cards only */ + if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) { + ret = tn40_swnodes_register(priv); + if (ret) { + pr_err("swnodes failed\n"); + return ret; + } + + ret = device_add_software_node(&bus->dev, + priv->nodes.group[SWNODE_MDIO]); + if (ret) { + dev_err(&pdev->dev, + "device_add_software_node failed: %d\n", ret); + goto err_swnodes_unregister; + } + }
ret = devm_mdiobus_register(&pdev->dev, bus); if (ret) { dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n", ret, bus->state, MDIOBUS_UNREGISTERED); - return ret; + goto err_swnodes_cleanup; } tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ); - priv->mdio = bus; return 0; + +err_swnodes_unregister: + software_node_unregister_node_group(priv->nodes.group); + return ret; +err_swnodes_cleanup: + tn40_swnodes_cleanup(priv); + return ret; } + +MODULE_FIRMWARE(AQR105_FIRMWARE); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 557cc71b9dd2..0eee1a0527b5 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1417,6 +1417,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; SET_NETDEV_DEV(ndev, dev); + ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) { /* CPSW Host port CPDMA interface is shared between diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 753215ebc67c..a036910f6082 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1446,8 +1446,7 @@ static u8 mcps_data_request( command.pdata.data_req.src_addr_mode = src_addr_mode; command.pdata.data_req.dst.mode = dst_address_mode; if (dst_address_mode != MAC_MODE_NO_ADDR) { - command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); - command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); + put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id); if (dst_address_mode == MAC_MODE_SHORT_ADDR) { command.pdata.data_req.dst.address[0] = LS_BYTE( dst_addr->short_address @@ -1795,12 +1794,12 @@ static int ca8210_skb_rx( } hdr.source.mode = data_ind[0]; dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); - hdr.source.pan_id = *(u16 *)&data_ind[1]; + hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1])); dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); memcpy(&hdr.source.extended_addr, &data_ind[3], 8); hdr.dest.mode = data_ind[11]; dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); - hdr.dest.pan_id = *(u16 *)&data_ind[12]; + hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12])); dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
@@ -1927,7 +1926,7 @@ static int ca8210_skb_tx( status = mcps_data_request( header.source.mode, header.dest.mode, - header.dest.pan_id, + le16_to_cpu(header.dest.pan_id), (union macaddr *)&header.dest.extended_addr, skb->len - mac_len, &skb->data[mac_len], diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index 6622de48fc9e..503a9174321c 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -538,7 +538,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) rc = __i2c_transfer(midev->adapter, &msg, 1);
/* on tx errors, the flow can no longer be considered valid */ - if (rc) + if (rc < 0) mctp_i2c_invalidate_tx_flow(midev, skb);
break; diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 9788b820c6be..99a5eee77bec 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* NXP C45 PHY driver - * Copyright 2021-2023 NXP + * Copyright 2021-2025 NXP * Author: Radu Pirea radu-nicolae.pirea@oss.nxp.com */
@@ -18,6 +18,8 @@
#include "nxp-c45-tja11xx.h"
+#define PHY_ID_MASK GENMASK(31, 4) +/* Same id: TJA1103, TJA1104 */ #define PHY_ID_TJA_1103 0x001BB010 #define PHY_ID_TJA_1120 0x001BB031
@@ -1930,6 +1932,30 @@ static void tja1120_nmi_handler(struct phy_device *phydev, } }
+static int nxp_c45_macsec_ability(struct phy_device *phydev) +{ + bool macsec_ability; + int phy_abilities; + + phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_ABILITIES); + macsec_ability = !!(phy_abilities & MACSEC_ABILITY); + + return macsec_ability; +} + +static int tja1103_match_phy_device(struct phy_device *phydev) +{ + return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) && + !nxp_c45_macsec_ability(phydev); +} + +static int tja1104_match_phy_device(struct phy_device *phydev) +{ + return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) && + nxp_c45_macsec_ability(phydev); +} + static const struct nxp_c45_regmap tja1120_regmap = { .vend1_ptp_clk_period = 0x1020, .vend1_event_msg_filt = 0x9010, @@ -2000,7 +2026,6 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
static struct phy_driver nxp_c45_driver[] = { { - PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), .name = "NXP C45 TJA1103", .get_features = nxp_c45_get_features, .driver_data = &tja1103_phy_data, @@ -2022,6 +2047,31 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, + .match_phy_device = tja1103_match_phy_device, + }, + { + .name = "NXP C45 TJA1104", + .get_features = nxp_c45_get_features, + .driver_data = &tja1103_phy_data, + .probe = nxp_c45_probe, + .soft_reset = nxp_c45_soft_reset, + .config_aneg = genphy_c45_config_aneg, + .config_init = nxp_c45_config_init, + .config_intr = tja1103_config_intr, + .handle_interrupt = nxp_c45_handle_interrupt, + .read_status = genphy_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = genphy_c45_pma_resume, + .get_sset_count = nxp_c45_get_sset_count, + .get_strings = nxp_c45_get_strings, + .get_stats = nxp_c45_get_stats, + .cable_test_start = nxp_c45_cable_test_start, + .cable_test_get_status = nxp_c45_cable_test_get_status, + .set_loopback = genphy_c45_loopback, + .get_sqi = nxp_c45_get_sqi, + .get_sqi_max = nxp_c45_get_sqi_max, + .remove = nxp_c45_remove, + .match_phy_device = tja1104_match_phy_device, }, { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 3e9957b6aa14..b78dfcbec936 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1811,7 +1811,7 @@ bool phylink_expects_phy(struct phylink *pl) { if (pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && - phy_interface_mode_is_8023z(pl->link_config.interface))) + phy_interface_mode_is_8023z(pl->link_interface))) return false; return true; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 96fa3857d8e2..2cab046749a9 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -10085,6 +10085,7 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, + { USB_DEVICE(VENDOR_ID_DELL, 0xb097) }, { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b70654c7ad34..151d7cdfc480 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3599,8 +3599,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) struct vmxnet3_adapter *adapter = netdev_priv(netdev); int err = 0;
- WRITE_ONCE(netdev->mtu, new_mtu); - /* * Reset_work may be in the middle of resetting the device, wait for its * completion. @@ -3614,6 +3612,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
/* we need to re-create the rx queue based on the new mtu */ vmxnet3_rq_destroy_all(adapter); + WRITE_ONCE(netdev->mtu, new_mtu); vmxnet3_adjust_rx_ring_size(adapter); err = vmxnet3_rq_create_all(adapter); if (err) { @@ -3630,6 +3629,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) "Closing it\n", err); goto out; } + } else { + WRITE_ONCE(netdev->mtu, new_mtu); }
out: diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 5e7cdd1b806f..474faccf75fd 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -227,9 +227,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, be32_to_cpu(fdb->vni))) goto nla_put_failure;
- ci.ndm_used = jiffies_to_clock_t(now - fdb->used); + ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used)); ci.ndm_confirmed = 0; - ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); + ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated)); ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) @@ -434,8 +434,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, vni); - if (f && f->used != jiffies) - f->used = jiffies; + if (f && READ_ONCE(f->used) != jiffies) + WRITE_ONCE(f->used, jiffies);
return f; } @@ -1009,12 +1009,12 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { if (f->state != state) { f->state = state; - f->updated = jiffies; + WRITE_ONCE(f->updated, jiffies); notify = 1; } if (f->flags != fdb_flags) { f->flags = fdb_flags; - f->updated = jiffies; + WRITE_ONCE(f->updated, jiffies); notify = 1; } } @@ -1048,7 +1048,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, }
if (ndm_flags & NTF_USE) - f->used = jiffies; + WRITE_ONCE(f->used, jiffies);
if (notify) { if (rd == NULL) @@ -1477,7 +1477,7 @@ static bool vxlan_snoop(struct net_device *dev, src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip; - f->updated = jiffies; + WRITE_ONCE(f->updated, jiffies); vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); } else { u32 hash_index = fdb_head_index(vxlan, src_mac, vni); @@ -2825,7 +2825,7 @@ static void vxlan_cleanup(struct timer_list *t) if (f->flags & NTF_EXT_LEARNED) continue;
- timeout = f->used + vxlan->cfg.age_interval * HZ; + timeout = READ_ONCE(f->used) + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", @@ -4340,6 +4340,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); + bool rem_ip_changed, change_igmp; struct net_device *lowerdev; struct vxlan_config conf; struct vxlan_rdst *dst; @@ -4363,8 +4364,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err;
+ rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip); + change_igmp = vxlan->dev->flags & IFF_UP && + (rem_ip_changed || + dst->remote_ifindex != conf.remote_ifindex); + /* handle default dst entry */ - if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { + if (rem_ip_changed) { u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]); @@ -4408,6 +4414,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], } }
+ if (change_igmp && vxlan_addr_multicast(&dst->remote_ip)) + err = vxlan_multicast_leave(vxlan); + if (conf.age_interval != vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies);
@@ -4415,7 +4424,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (lowerdev && lowerdev != dst->remote_dev) dst->remote_dev = lowerdev; vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); - return 0; + + if (!err && change_igmp && + vxlan_addr_multicast(&dst->remote_ip)) + err = vxlan_multicast_join(vxlan); + + return err; }
static void vxlan_dellink(struct net_device *dev, struct list_head *head) diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 65d2bc0687c8..eaf902c25e19 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved. */
#ifndef ATH11K_DP_H @@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
struct dp_rx_tid { u8 tid; - u32 *vaddr; dma_addr_t paddr; u32 size; u32 ba_win_sz; @@ -37,6 +36,9 @@ struct dp_rx_tid { /* Timer info related to fragments */ struct timer_list frag_timer; struct ath11k_base *ab; + u32 *vaddr_unaligned; + dma_addr_t paddr_unaligned; + u32 unaligned_size; };
#define DP_REO_DESC_FREE_THRESHOLD 64 diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 40b52d12b432..bfb8e7b1a300 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/ieee80211.h> @@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); rx_tid = &cmd->data; - if (rx_tid->vaddr) { - dma_unmap_single(ab->dev, rx_tid->paddr, - rx_tid->size, DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } kfree(cmd); } @@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; rx_tid = &cmd_cache->data; - if (rx_tid->vaddr) { - dma_unmap_single(ab->dev, rx_tid->paddr, - rx_tid->size, DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } kfree(cmd_cache); } @@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); - if (rx_tid->vaddr) { - dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } }
@@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, if (ret) { ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", rx_tid->tid, ret); - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } }
@@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
return; free_desc: - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; }
void ath11k_peer_rx_tid_delete(struct ath11k *ar, @@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar, if (ret != -ESHUTDOWN) ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", tid, ret); - dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; }
rx_tid->paddr = 0; + rx_tid->paddr_unaligned = 0; rx_tid->size = 0; + rx_tid->unaligned_size = 0; }
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, @@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, if (!rx_tid->active) goto unlock_exit;
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL;
rx_tid->active = false;
@@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; - u32 hw_desc_sz; - u32 *addr_aligned; - void *vaddr; + u32 hw_desc_sz, *vaddr; + void *vaddr_unaligned; dma_addr_t paddr; int ret;
@@ -1050,49 +1050,40 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, else hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); - if (!vaddr) { + rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; + vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, + DMA_BIDIRECTIONAL, GFP_ATOMIC); + if (!vaddr_unaligned) { spin_unlock_bh(&ab->base_lock); return -ENOMEM; }
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); - - ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, - ssn, pn_type); - - paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, - DMA_BIDIRECTIONAL); - - ret = dma_mapping_error(ab->dev, paddr); - if (ret) { - spin_unlock_bh(&ab->base_lock); - ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", - peer_mac, tid, ret); - goto err_mem_free; - } - - rx_tid->vaddr = vaddr; - rx_tid->paddr = paddr; + rx_tid->vaddr_unaligned = vaddr_unaligned; + vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); + rx_tid->paddr_unaligned = paddr; + rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - + (unsigned long)rx_tid->vaddr_unaligned); + ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); rx_tid->size = hw_desc_sz; rx_tid->active = true;
+ /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. + * Since these changes are not reflected in the device, driver now needs to + * explicitly call dma_sync_single_for_device. + */ + dma_sync_single_for_device(ab->dev, rx_tid->paddr, + rx_tid->size, + DMA_TO_DEVICE); spin_unlock_bh(&ab->base_lock);
- ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, - paddr, tid, 1, ba_win_sz); + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, + tid, 1, ba_win_sz); if (ret) { ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); }
- return ret; - -err_mem_free: - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; - return ret; }
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 51252e8bc1ae..8bb8ee98188b 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -161,7 +161,7 @@ EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, size_t name_len, bool with_variant, - bool bus_type_mode) + bool bus_type_mode, bool with_default) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; @@ -192,7 +192,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", ath12k_bus_str(ab->hif.bus), ab->qmi.target.chip_id, - ab->qmi.target.board_id, variant); + with_default ? + ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id, + variant); break; }
@@ -204,19 +206,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, size_t name_len) { - return __ath12k_core_create_board_name(ab, name, name_len, true, false); + return __ath12k_core_create_board_name(ab, name, name_len, true, false, false); }
static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name, size_t name_len) { - return __ath12k_core_create_board_name(ab, name, name_len, false, false); + return __ath12k_core_create_board_name(ab, name, name_len, false, false, true); }
static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name, size_t name_len) { - return __ath12k_core_create_board_name(ab, name, name_len, false, true); + return __ath12k_core_create_board_name(ab, name, name_len, false, true, true); }
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 7f2e9a9b4097..3faf3430effb 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -148,6 +148,7 @@ struct ath12k_ext_irq_grp { u32 num_irq; u32 grp_id; u64 timestamp; + bool napi_enabled; struct napi_struct napi; struct net_device *napi_ndev; }; diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 4c98b9de1e58..6a8874536944 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */
#include "dp_mon.h" @@ -666,6 +666,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, if (userid < HAL_MAX_UL_MU_USERS) { struct hal_rx_user_status *rxuser_stats = &ppdu_info->userstats[userid]; + + if (ppdu_info->num_mpdu_fcs_ok > 1 || + ppdu_info->num_mpdu_fcs_err > 1) + ppdu_info->userstats[userid].ampdu_present = true; + ppdu_info->num_users += 1;
ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats); @@ -783,8 +788,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, if (userid < HAL_MAX_UL_MU_USERS) { info[0] = __le32_to_cpu(mpdu_start->info0); ppdu_info->userid = userid; - ppdu_info->ampdu_id[userid] = - u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID); + ppdu_info->userstats[userid].ampdu_id = + u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID); }
mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); @@ -1020,15 +1025,14 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar, { struct ieee80211_supported_band *sband; u8 *ptr = NULL; - u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
rxs->flag |= RX_FLAG_MACTIME_START; rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; rxs->nss = ppduinfo->nss + 1;
- if (ampdu_id) { + if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) { rxs->flag |= RX_FLAG_AMPDU_DETAILS; - rxs->ampdu_reference = ampdu_id; + rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id; }
if (ppduinfo->he_mu_flags) { diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 44406e0b4a34..201ffdb8c44a 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -117,7 +117,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, le32_encode_bits(ti->data_len, HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
- tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | + tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | le32_encode_bits(ti->encap_type, HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) | le32_encode_bits(ti->encrypt_type, @@ -557,13 +557,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
switch (wbm_status) { case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: - case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: - case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); ts.ack_rssi = le32_get_bits(status_desc->info2, HTT_TX_WBM_COMP_INFO2_ACK_RSSI); ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts); break; + case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 739f73370015..4f745cfd7d8e 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -2966,7 +2966,7 @@ struct hal_mon_buf_ring {
#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
-#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0) +#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0) #define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16) #define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17) #define HAL_MON_DEST_INFO0_INITIATOR BIT(18) diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h index 095216eabc01..8c37cbc01b1c 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.h +++ b/drivers/net/wireless/ath/ath12k/hal_rx.h @@ -143,6 +143,8 @@ struct hal_rx_user_status { u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP]; u32 mpdu_ok_byte_count; u32 mpdu_err_byte_count; + bool ampdu_present; + u16 ampdu_id; };
#define HAL_MAX_UL_MU_USERS 37 @@ -226,7 +228,6 @@ struct hal_rx_mon_ppdu_info { u8 addr4[ETH_ALEN]; struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS]; u8 userid; - u16 ampdu_id[HAL_MAX_UL_MU_USERS]; bool first_msdu_in_mpdu; bool is_ampdu; u8 medium_prot_type; diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 2ff866e1d7d5..45d537066345 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -481,8 +481,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
ath12k_pci_ext_grp_disable(irq_grp);
- napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } }
@@ -1112,7 +1115,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } + ath12k_pci_ext_grp_enable(irq_grp); }
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index a6ba97949440..30836a09d550 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -2206,8 +2206,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar, arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; - arg->dwell_time_active_6g = 40; - arg->dwell_time_passive_6g = 30; + arg->dwell_time_active_6g = 70; + arg->dwell_time_passive_6g = 70; arg->min_rest_time = 50; arg->max_rest_time = 500; arg->repeat_probe_time = 0; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 7fad7e75af6a..619bebd389bd 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -691,7 +691,9 @@ static int ath9k_of_init(struct ath_softc *sc) ah->ah_flags |= AH_NO_EEP_SWAP; }
- of_get_mac_address(np, common->macaddr); + ret = of_get_mac_address(np, common->macaddr); + if (ret == -EPROBE_DEFER) + return ret;
return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c index ab7c0f8d54f4..d3542af0f625 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c @@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = { .mq_rx_supported = true, .rf_id = true, .gen2 = true, - .integrated = true, .umac_prph_offset = 0x300000, .xtal_latency = 12000, .low_latency_xtal = true, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, };
const char iwl_br_name[] = "Intel(R) TBD Br device"; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 6594216f873c..cd284767ff4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -2691,7 +2691,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, } /* collect DRAM_IMR region in the last */ if (imr_reg_data.reg_tlv) - size += iwl_dump_ini_mem(fwrt, list, ®_data, + size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data, &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 834f7c9bb9e9..86d6286a1537 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2021-2024 Intel Corporation + * Copyright(c) 2021-2025 Intel Corporation */
#include "iwl-drv.h" @@ -673,8 +673,10 @@ int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) struct uefi_cnv_var_eckv *data; int ret = 0;
- data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME, - "ECKV", sizeof(*data), NULL); + data = iwl_uefi_get_verified_variable_guid(fwrt->trans, + &IWL_EFI_WIFI_BT_GUID, + IWL_UEFI_ECKV_NAME, + "ECKV", sizeof(*data), NULL); if (IS_ERR(data)) return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index e525d449e656..c931f5cedb0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2021-2024 Intel Corporation + * Copyright(c) 2021-2025 Intel Corporation */ #ifndef __iwl_fw_uefi__ #define __iwl_fw_uefi__ @@ -19,7 +19,7 @@ #define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS" #define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC" #define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD" -#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV" +#define IWL_UEFI_ECKV_NAME L"UefiCnvCommonECKV" #define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg" #define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM" #define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 08d990ba8a79..ce787326aa69 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include <linux/firmware.h> #include "iwl-drv.h" @@ -1372,15 +1372,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, switch (tp_id) { case IWL_FW_INI_TIME_POINT_EARLY: iwl_dbg_tlv_init_cfg(fwrt); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_update_drams(fwrt); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: iwl_dbg_tlv_apply_buffers(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; case IWL_FW_INI_TIME_POINT_PERIODIC: iwl_dbg_tlv_set_periodic_trigs(fwrt); @@ -1390,14 +1390,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; default: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; } } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 3b3dcaf33c9d..510e04b721da 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021, 2023-2024 Intel Corporation + * Copyright (C) 2019-2021, 2023-2025 Intel Corporation */ #include <linux/kernel.h> #include <linux/bsearch.h> @@ -419,6 +419,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_tx); void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs, bool is_flush) { + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return; + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "bad state = %d\n", trans->state)) return; @@ -451,6 +454,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) { + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "bad state = %d\n", trans->state)) return -EIO; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 55245f913286..2ed7a0d77ef8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -773,7 +773,11 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
target.bssid = bssid; target.cipher = cipher; + target.tk = NULL; ieee80211_iter_keys(mvm->hw, vif, iter, &target); + + if (!WARN_ON(!target.tk)) + memcpy(tk, target.tk, TK_11AZ_LEN); } else { memcpy(tk, entry->tk, sizeof(entry->tk)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d37d83d24635..3fd257f770ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4096,6 +4096,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, return 0; }
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool update) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!iwl_mvm_has_rlc_offload(mvm)) + return; + + mvmvif->ps_disabled = !vif->cfg.ps; + + if (update) + iwl_mvm_power_update_mac(mvm); +} + /* Common part for MLD and non-MLD modes */ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -4188,6 +4202,7 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, new_state == IEEE80211_STA_AUTHORIZED) { ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta, callbacks); + iwl_mvm_smps_workaround(mvm, vif, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index e252f0dcea20..04da02bdd953 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022-2024 Intel Corporation + * Copyright (C) 2022-2025 Intel Corporation */ #include "mvm.h"
@@ -961,6 +961,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, }
if (changes & BSS_CHANGED_PS) { + iwl_mvm_smps_workaround(mvm, vif, false); ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ef07cff203b0..7d86d273092a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -3042,4 +3042,7 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, bool is_ap); + +void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool update); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 9141ea57abfc..68989d183e82 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -587,6 +587,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 66f0f5377ac1..738bafc3749b 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -403,12 +403,14 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && bss_desc->bcn_ht_oper->ht_param & - IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) + IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) { + chan_list->chan_scan_param[0].radio_type |= + CHAN_BW_40MHZ << 2; SET_SECONDARYCHAN(chan_list->chan_scan_param[0]. radio_type, (bss_desc->bcn_ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET)); - + } *buffer += struct_size(chan_list, chan_scan_param, 1); ret_len += struct_size(chan_list, chan_scan_param, 1); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index e2e9b5ece74e..a6ac8e5512eb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -489,6 +489,7 @@ struct mt76_hw_cap { #define MT_DRV_RX_DMA_HDR BIT(3) #define MT_DRV_HW_MGMT_TXQ BIT(4) #define MT_DRV_AMSDU_OFFLOAD BIT(5) +#define MT_DRV_IGNORE_TXS_FAILED BIT(6)
struct mt76_driver_ops { u32 drv_flags; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index db0c29e65185..487ad716f872 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -314,6 +314,9 @@ enum tx_frag_idx { #define MT_TXFREE_INFO_COUNT GENMASK(27, 24) #define MT_TXFREE_INFO_STAT GENMASK(29, 28)
+#define MT_TXS_HDR_SIZE 4 /* Unit: DW */ +#define MT_TXS_SIZE 12 /* Unit: DW */ + #define MT_TXS0_BW GENMASK(31, 29) #define MT_TXS0_TID GENMASK(28, 26) #define MT_TXS0_AMPDU BIT(25) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 1eb955f3ca13..911e162a4598 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -156,7 +156,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), .drv_flags = MT_DRV_TX_ALIGNED4_SKBS | - MT_DRV_SW_RX_AIRTIME, + MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x0_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index b031c500b741..90e5666c0857 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -214,7 +214,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { - .drv_flags = MT_DRV_SW_RX_AIRTIME, + .drv_flags = MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x0_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 67c9d1caa0bd..55f076231bdc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), .drv_flags = MT_DRV_TX_ALIGNED4_SKBS | - MT_DRV_SW_RX_AIRTIME, + MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x2e_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index a4f4d12f904e..84ef80ab4afb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -30,7 +30,8 @@ static int mt76x2u_probe(struct usb_interface *intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { - .drv_flags = MT_DRV_SW_RX_AIRTIME, + .drv_flags = MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x2u_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 8476f9caa98d..2396e1795fe1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -616,6 +616,54 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, return ret; }
+static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val) +{ + struct { + u8 rsv[4]; + + __le16 tag; + __le16 len; + + __le32 addr; + __le32 valid; + u8 data[MT7925_EEPROM_BLOCK_SIZE]; + } __packed req = { + .tag = cpu_to_le16(1), + .len = cpu_to_le16(sizeof(req) - 4), + .addr = cpu_to_le32(round_down(offset, + MT7925_EEPROM_BLOCK_SIZE)), + }; + struct evt { + u8 rsv[4]; + + __le16 tag; + __le16 len; + + __le32 ver; + __le32 addr; + __le32 valid; + __le32 size; + __le32 magic_num; + __le32 type; + __le32 rsv1[4]; + u8 data[32]; + } __packed *res; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + res = (struct evt *)skb->data; + *val = res->data[offset % MT7925_EEPROM_BLOCK_SIZE]; + + dev_kfree_skb(skb); + + return 0; +} + static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) { const struct mt76_connac2_fw_trailer *hdr; @@ -624,13 +672,20 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) struct mt76_dev *mdev = &dev->mt76; struct mt792x_phy *phy = &dev->phy; const struct firmware *fw; + u8 *clc_base = NULL, hw_encap = 0; int ret, i, len, offset = 0; - u8 *clc_base = NULL;
if (mt7925_disable_clc || mt76_is_usb(&dev->mt76)) return 0;
+ if (mt76_is_mmio(&dev->mt76)) { + ret = mt7925_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap); + if (ret) + return ret; + hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP); + } + ret = request_firmware(&fw, fw_name, mdev->dev); if (ret) return ret; @@ -675,6 +730,10 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) if (phy->clc[clc->idx]) continue;
+ /* header content sanity */ + if (u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap) + continue; + phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc, le32_to_cpu(clc->len), GFP_KERNEL); @@ -3228,6 +3287,9 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, else uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL)) + uni_txd->option &= ~MCU_CMD_ACK; + goto exit; }
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index df3c705d1cb3..4ad779329b8f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -147,9 +147,12 @@ enum mt7925_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, MT_EE_MAC_ADDR = 0x004, + MT_EE_HW_TYPE = 0xa71, __MT_EE_MAX = 0x9ff };
+#define MT_EE_HW_TYPE_ENCAP GENMASK(1, 0) + enum { TXPWR_USER, TXPWR_EEPROM, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index f590902fdeea..ef2d7eaaaffd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -1399,7 +1399,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) mt7996_mac_tx_free(dev, data, len); return false; case PKT_TYPE_TXS: - for (rxd += 4; rxd + 8 <= end; rxd += 8) + for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) mt7996_mac_add_txs(dev, rxd); return false; case PKT_TYPE_RX_FW_MONITOR: @@ -1442,7 +1442,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, mt7996_mcu_rx_event(dev, skb); break; case PKT_TYPE_TXS: - for (rxd += 4; rxd + 8 <= end; rxd += 8) + for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) mt7996_mac_add_txs(dev, rxd); dev_kfree_skb(skb); break; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index 43468bcaffc6..a75e1c9435bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -908,7 +908,8 @@ enum { UNI_CMD_SER_SET_RECOVER_L3_TX_DISABLE, UNI_CMD_SER_SET_RECOVER_L3_BF, UNI_CMD_SER_SET_RECOVER_L4_MDP, - UNI_CMD_SER_SET_RECOVER_FULL, + UNI_CMD_SER_SET_RECOVER_FROM_ETH, + UNI_CMD_SER_SET_RECOVER_FULL = 8, UNI_CMD_SER_SET_SYSTEM_ASSERT, /* action */ UNI_CMD_SER_ENABLE = 1, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index 442f72450352..b6209ed1cfe0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -281,7 +281,7 @@ static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed) if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state)) return -EBUSY;
- ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1, + ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_FROM_ETH, mphy->band_idx); if (ret) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index ce193e625666..065a1e453745 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, return;
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */ - if (flags & MT_TX_CB_TXS_FAILED) { + if (flags & MT_TX_CB_TXS_FAILED && + (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) { info->status.rates[0].count = 0; info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 4ce0c05c5129..569856ca677f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -860,9 +860,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len) return len;
write_error: - dev_info(&udev->dev, - "%s: Failed to write block at addr: %04x size: %04x\n", - __func__, addr, blocksize); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) + dev_info(&udev->dev, + "%s: Failed to write block at addr: %04x size: %04x\n", + __func__, addr, blocksize); return -EAGAIN; }
@@ -4064,8 +4065,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
- ret = rtl8xxxu_download_firmware(priv); - dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); + for (int retry = 5; retry >= 0 ; retry--) { + ret = rtl8xxxu_download_firmware(priv); + dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); + if (ret != -EAGAIN) + break; + if (retry) + dev_dbg(dev, "%s: retry firmware download\n", __func__); + } if (ret) goto exit; ret = rtl8xxxu_start_firmware(priv); diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 564f5988ee82..d1c4f5cdcb21 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -783,7 +783,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev, if (!check_firmware_size(data, size)) return -EINVAL;
- if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) + if (rtwdev->chip->ltecoex_addr && + !ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) return -EBUSY;
wlan_cpu_enable(rtwdev, false); @@ -801,7 +802,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
wlan_cpu_enable(rtwdev, true);
- if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { + if (rtwdev->chip->ltecoex_addr && + !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { ret = -EBUSY; goto dlfw_fail; } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index bbdef38c7e34..a808af2f085e 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1543,6 +1543,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; + int i;
ht_cap->ht_supported = true; ht_cap->cap = 0; @@ -1562,25 +1563,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = chip->ampdu_density; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (efuse->hw_cap.nss > 1) { - ht_cap->mcs.rx_mask[0] = 0xFF; - ht_cap->mcs.rx_mask[1] = 0xFF; - ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = cpu_to_le16(300); - } else { - ht_cap->mcs.rx_mask[0] = 0xFF; - ht_cap->mcs.rx_mask[1] = 0x00; - ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = cpu_to_le16(150); - } + + for (i = 0; i < efuse->hw_cap.nss; i++) + ht_cap->mcs.rx_mask[i] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss); }
static void rtw_init_vht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_vht_cap *vht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; - u16 mcs_map; + u16 mcs_map = 0; __le16 highest; + int i;
if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) @@ -1603,21 +1599,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev, if (rtw_chip_has_rx_ldpc(rtwdev)) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; - if (efuse->hw_cap.nss > 1) { - highest = cpu_to_le16(780); - mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; - } else { - highest = cpu_to_le16(390); - mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; + for (i = 0; i < 8; i++) { + if (i < efuse->hw_cap.nss) + mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); }
+ highest = cpu_to_le16(390 * efuse->hw_cap.nss); + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.rx_highest = highest; diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 4d9b8668e8b0..2708ee2f12a4 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -109,6 +109,7 @@ #define BIT_SHIFT_ROM_PGE 16 #define BIT_FW_INIT_RDY BIT(15) #define BIT_FW_DW_RDY BIT(14) +#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13)) #define BIT_RPWM_TOGGLE BIT(7) #define BIT_RAM_DL_SEL BIT(7) /* legacy only */ #define BIT_DMEM_CHKSUM_OK BIT(6) @@ -126,7 +127,7 @@ BIT_CHECK_SUM_OK) #define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \ BIT_WINTINI_RDY | BIT_RAM_DL_SEL) -#define FW_READY_MASK 0xffff +#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL)
#define REG_MCU_TST_CFG 0x84 #define VAL_FW_TRIGGER 0x1 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 6edb17aea90e..4a6c0a9266a0 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -976,11 +976,11 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, }
static void -rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, + u8 rs, u32 *phy_pwr_idx) { struct rtw_hal *hal = &rtwdev->hal; static const u32 offset_txagc[2] = {0x1d00, 0x1d80}; - static u32 phy_pwr_idx; u8 rate, rate_idx, pwr_index, shift; int j;
@@ -988,12 +988,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) rate = rtw_rate_section[rs][j]; pwr_index = hal->tx_pwr_tbl[path][rate]; shift = rate & 0x3; - phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + *phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); if (shift == 0x3) { rate_idx = rate & 0xfc; rtw_write32(rtwdev, offset_txagc[path] + rate_idx, - phy_pwr_idx); - phy_pwr_idx = 0; + *phy_pwr_idx); + *phy_pwr_idx = 0; } } } @@ -1001,11 +1001,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; + u32 phy_pwr_idx = 0; int rs, path;
for (path = 0; path < hal->rf_path_num; path++) { for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) - rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs); + rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs, + &phy_pwr_idx); } }
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c index e222d3c01a77..66819f694405 100644 --- a/drivers/net/wireless/realtek/rtw88/util.c +++ b/drivers/net/wireless/realtek/rtw88/util.c @@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) *nss = 4; *mcs = rate - DESC_RATEVHT4SS_MCS0; } else if (rate >= DESC_RATEMCS0 && - rate <= DESC_RATEMCS15) { + rate <= DESC_RATEMCS31) { + *nss = 0; *mcs = rate - DESC_RATEMCS0; } } diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index 8d54d71fcf53..6cdbf02f405a 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -89,10 +89,10 @@ static const struct rtw89_btc_fbtc_slot s_def[] = { [CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX), [CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO), [CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX), - [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX), - [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO), + [CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX), + [CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO), [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX), - [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO), + [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO), [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX), [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO), [CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO), @@ -5356,7 +5356,8 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { + if (btc->cx.state_map != BTC_WLINKING && + RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { _action_wl_25g_mcc(rtwdev); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n"); } else if (rtwdev->dbcc_en) { @@ -7178,6 +7179,8 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx) _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC); }
+ btc->dm.tdma_instant_excute = 1; + _run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH); }
@@ -7630,7 +7633,8 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, else wl->status.map.connecting = 0;
- if (state == BTC_ROLE_MSTS_STA_DIS_CONN) + if (state == BTC_ROLE_MSTS_STA_DIS_CONN || + state == BTC_ROLE_MSTS_STA_CONN_END) wl->status.map._4way = false;
_run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index f82a26be6fa8..83b22bd0ce81 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -4862,8 +4862,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
rtw89_hci_mac_pre_deinit(rtwdev);
- rtw89_mac_pwr_off(rtwdev); - return 0; }
@@ -4944,36 +4942,45 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
rtw89_read_chip_ver(rtwdev);
+ ret = rtw89_mac_pwr_on(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to power on\n"); + return ret; + } + ret = rtw89_wait_firmware_completion(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to wait firmware completion\n"); - return ret; + goto out; }
ret = rtw89_fw_recognize(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to recognize firmware\n"); - return ret; + goto out; }
ret = rtw89_chip_efuse_info_setup(rtwdev); if (ret) - return ret; + goto out;
ret = rtw89_fw_recognize_elements(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to recognize firmware elements\n"); - return ret; + goto out; }
ret = rtw89_chip_board_info_setup(rtwdev); if (ret) - return ret; + goto out;
rtw89_core_setup_rfe_parms(rtwdev); rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
- return 0; +out: + rtw89_mac_pwr_off(rtwdev); + + return ret; } EXPORT_SYMBOL(rtw89_chip_info_setup);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index ff3048d2489f..4f64ea392e6c 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -17,6 +17,7 @@ struct rtw89_dev; struct rtw89_pci_info; struct rtw89_mac_gen_def; struct rtw89_phy_gen_def; +struct rtw89_fw_blacklist; struct rtw89_efuse_block_cfg; struct rtw89_h2c_rf_tssi; struct rtw89_fw_txpwr_track_cfg; @@ -4232,6 +4233,7 @@ struct rtw89_chip_info { bool try_ce_fw; u8 bbmcu_nr; u32 needed_fw_elms; + const struct rtw89_fw_blacklist *fw_blacklist; u32 fifo_size; bool small_fifo_size; u32 dle_scc_rsvd_size; diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 620e076d1b59..e5c90050e711 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -38,6 +38,16 @@ struct rtw89_arp_rsp {
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = { + .ver = 0x00, + .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, +}; +EXPORT_SYMBOL(rtw89_fw_blacklist_default); + union rtw89_fw_element_arg { size_t offset; enum rtw89_rf_path rf_path; @@ -285,7 +295,7 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, if (!sec->secure_boot) goto out;
- sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); + sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v); if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) goto ignore;
@@ -315,6 +325,46 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, return 0; }
+static int __check_secure_blacklist(struct rtw89_dev *rtwdev, + struct rtw89_fw_bin_info *info, + struct rtw89_fw_hdr_section_info *section_info, + const void *content) +{ + const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist; + const union rtw89_fw_section_mssc_content *section_content = content; + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + u8 byte_idx; + u8 bit_mask; + + if (!sec->secure_boot) + return 0; + + if (!info->secure_section_exist || section_info->ignore) + return 0; + + if (!chip_blacklist) { + rtw89_err(rtwdev, "chip no blacklist for secure firmware\n"); + return -ENOENT; + } + + byte_idx = section_content->blacklist.bit_in_chip_list >> 3; + bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7); + + if (section_content->blacklist.ver > chip_blacklist->ver) { + rtw89_err(rtwdev, "chip blacklist out of date (%u, %u)\n", + section_content->blacklist.ver, chip_blacklist->ver); + return -EINVAL; + } + + if (chip_blacklist->list[byte_idx] & bit_mask) { + rtw89_err(rtwdev, "firmware %u in chip blacklist\n", + section_content->blacklist.ver); + return -EPERM; + } + + return 0; +} + static int __parse_security_section(struct rtw89_dev *rtwdev, struct rtw89_fw_bin_info *info, struct rtw89_fw_hdr_section_info *section_info, @@ -340,7 +390,7 @@ static int __parse_security_section(struct rtw89_dev *rtwdev, info->secure_section_exist = true; }
- return 0; + return __check_secure_blacklist(rtwdev, info, section_info, content); }
static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, @@ -451,6 +501,30 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, } }
+static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, + const struct firmware *firmware, + const struct rtw89_mfw_hdr *mfw_hdr) +{ + const void *mfw = firmware->data; + u32 mfw_len = firmware->size; + u8 fw_nr = mfw_hdr->fw_nr; + const void *ptr; + + if (fw_nr == 0) { + rtw89_err(rtwdev, "mfw header has no fw entry\n"); + return -ENOENT; + } + + ptr = &mfw_hdr->info[fw_nr]; + + if (ptr > mfw + mfw_len) { + rtw89_err(rtwdev, "mfw header out of address\n"); + return -EFAULT; + } + + return 0; +} + static int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, struct rtw89_fw_suit *fw_suit, bool nowarn) @@ -461,6 +535,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, u32 mfw_len = firmware->size; const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; const struct rtw89_mfw_info *mfw_info = NULL, *tmp; + int ret; int i;
if (mfw_hdr->sig != RTW89_MFW_SIG) { @@ -473,6 +548,10 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, return 0; }
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); + if (ret) + return ret; + for (i = 0; i < mfw_hdr->fw_nr; i++) { tmp = &mfw_hdr->info[i]; if (tmp->type != type) @@ -502,6 +581,12 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, found: fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); fw_suit->size = le32_to_cpu(mfw_info->size); + + if (fw_suit->data + fw_suit->size > mfw + mfw_len) { + rtw89_err(rtwdev, "fw_suit %d out of address\n", type); + return -EFAULT; + } + return 0; }
@@ -513,12 +598,17 @@ static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) (const struct rtw89_mfw_hdr *)firmware->data; const struct rtw89_mfw_info *mfw_info; u32 size; + int ret;
if (mfw_hdr->sig != RTW89_MFW_SIG) { rtw89_warn(rtwdev, "not mfw format\n"); return 0; }
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); + if (ret) + return ret; + mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
@@ -1234,7 +1324,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); - ret = -1; goto fail; }
@@ -1311,7 +1400,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, ret = rtw89_h2c_tx(rtwdev, skb, true); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); - ret = -1; goto fail; }
@@ -3080,9 +3168,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
- h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, + h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) | + le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, CCTLINFO_G7_W6_ULDL); - h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); + h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
if (rtwsta_link) { h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index ccbbc43f33fe..502ece540b9d 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -639,6 +639,11 @@ struct rtw89_fw_mss_pool_hdr { } __packed;
union rtw89_fw_section_mssc_content { + struct { + u8 pad[0x20]; + u8 bit_in_chip_list; + u8 ver; + } __packed blacklist; struct { u8 pad[58]; __le32 v; @@ -649,6 +654,13 @@ union rtw89_fw_section_mssc_content { } __packed key_sign_len; } __packed;
+struct rtw89_fw_blacklist { + u8 ver; + u8 list[32]; +}; + +extern const struct rtw89_fw_blacklist rtw89_fw_blacklist_default; + static inline void SET_CTRL_INFO_MACID(void *table, u32 val) { le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)); diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 4574aa62839b..9b09d4b7dea5 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1491,6 +1491,21 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) #undef PWR_ACT }
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_mac_power_switch(rtwdev, true); + if (ret) { + rtw89_mac_power_switch(rtwdev, false); + ret = rtw89_mac_power_switch(rtwdev, true); + if (ret) + return ret; + } + + return 0; +} + void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) { rtw89_mac_power_switch(rtwdev, false); @@ -3918,14 +3933,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb) { int ret;
- ret = rtw89_mac_power_switch(rtwdev, true); - if (ret) { - rtw89_mac_power_switch(rtwdev, false); - ret = rtw89_mac_power_switch(rtwdev, true); - if (ret) - return ret; - } - rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
if (include_bb) { @@ -3958,6 +3965,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev) bool include_bb = !!chip->bbmcu_nr; int ret;
+ ret = rtw89_mac_pwr_on(rtwdev); + if (ret) + return ret; + ret = rtw89_mac_partial_init(rtwdev, include_bb); if (ret) goto fail; @@ -3989,7 +4000,7 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
return ret; fail: - rtw89_mac_power_switch(rtwdev, false); + rtw89_mac_pwr_off(rtwdev);
return ret; } @@ -4745,6 +4756,32 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask); }
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct ieee80211_bss_conf *bss_conf; + bool set; + u32 reg; + + if (rtwdev->chip->chip_gen != RTW89_CHIP_BE) + return; + + rcu_read_lock(); + + bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); + set = bss_conf->he_support && !bss_conf->eht_support; + + rcu_read_unlock(); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL, + rtwvif_link->mac_idx); + + if (set) + rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB); + else + rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB); +} + void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) { rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 0c269961a573..7974849f41e2 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -1120,6 +1120,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l rtw89_write32_set(rtwdev, reg, bit); }
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev); void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev); int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb); int rtw89_mac_init(struct rtw89_dev *rtwdev); @@ -1160,6 +1161,8 @@ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool en); void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); +void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link); void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en); int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 8351a70d325d..3a1a2b243adf 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -669,6 +669,7 @@ static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev, rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link); rtw89_mac_port_update(rtwdev, rtwvif_link); rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link); + rtw89_mac_set_he_tb(rtwdev, rtwvif_link); }
static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 69678eab2309..9fbcc7fee290 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -7093,6 +7093,10 @@ #define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3) #define B_BE_MACLBK_EN BIT(0)
+#define R_BE_CLIENT_OM_CTRL 0x11040 +#define R_BE_CLIENT_OM_CTRL_C1 0x15040 +#define B_BE_TRIG_DIS_EHTTB BIT(24) + #define R_BE_WMAC_NAV_CTL 0x11080 #define R_BE_WMAC_NAV_CTL_C1 0x15080 #define B_BE_WMAC_NAV_UPPER_EN BIT(26) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index bb064a086970..e8df68818da0 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -695,6 +695,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rtw89_dev *rtwdev = hw->priv;
+ wiphy_lock(wiphy); mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev);
@@ -712,6 +713,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
exit: mutex_unlock(&rtwdev->mutex); + wiphy_unlock(wiphy); }
/* Maximum Transmit Power field (@raw) can be EIRP or PSD. diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index f9766bf30e71..0d2a1e712b34 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -2443,6 +2443,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .try_ce_fw = true, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = NULL, .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 42d369d2e916..5f08207936c6 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2159,6 +2159,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .try_ce_fw = false, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = NULL, .fifo_size = 458752, .small_fifo_size = false, .dle_scc_rsvd_size = 0, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index 364aa21cbd44..0e03d97ba1cf 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -797,6 +797,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .try_ce_fw = true, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c index dab7e71ec6a1..1dd3e51bab9f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c @@ -731,6 +731,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = { .try_ce_fw = true, .bbmcu_nr = 0, .needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 458752, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index dbe77abb2c48..5e2592cf1a9f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2936,6 +2936,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .try_ce_fw = false, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 458752, .small_fifo_size = false, .dle_scc_rsvd_size = 0, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index ef7747adbcc2..64a41f24b2ad 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -2632,6 +2632,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .try_ce_fw = false, .bbmcu_nr = 1, .needed_fw_elms = RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 589824, .small_fifo_size = false, .dle_scc_rsvd_size = 0, diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 7b203bb7f151..02c2ac12f197 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -156,9 +156,11 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt) rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", ser_st_name(ser), ser_ev_name(ser, evt));
+ wiphy_lock(rtwdev->hw->wiphy); mutex_lock(&rtwdev->mutex); rtw89_leave_lps(rtwdev); mutex_unlock(&rtwdev->mutex); + wiphy_unlock(rtwdev->hw->wiphy);
ser->st_tbl[ser->state].st_func(ser, evt); } @@ -707,9 +709,11 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) { case SER_EV_STATE_IN: + wiphy_lock(rtwdev->hw->wiphy); mutex_lock(&rtwdev->mutex); ser_l2_reset_st_pre_hdl(ser); mutex_unlock(&rtwdev->mutex); + wiphy_unlock(rtwdev->hw->wiphy);
ieee80211_restart_hw(rtwdev->hw); ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 3f424f14de4e..4a2b7c9921bc 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -4,7 +4,7 @@ * Copyright (c) 2008, Jouni Malinen j@w1.fi * Copyright (c) 2011, Javier Lopez jlopex@gmail.com * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation */
/* @@ -1983,11 +1983,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, return; }
- if (sta && sta->mlo) { - if (WARN_ON(!link_sta)) { - ieee80211_free_txskb(hw, skb); - return; - } + /* Do address translations only between shared links. It is + * possible that while an non-AP MLD station and an AP MLD + * station have shared links, the frame is intended to be sent + * on a link which is not shared (for example when sending a + * probe response). + */ + if (sta && sta->mlo && link_sta) { /* address translation to link addresses on TX */ ether_addr_copy(hdr->addr1, link_sta->addr); ether_addr_copy(hdr->addr2, bss_conf->addr); diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 082253a3a956..04f4a049599a 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd) if (ndd->data) return 0;
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { + if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 || + ndd->nsarea.config_size == 0) { dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", ndd->nsarea.max_xfer, ndd->nsarea.config_size); return -ENXIO; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 265b3608ae26..cd8a10f6accf 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3587,6 +3587,9 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, + { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_BOGUS_NID, }, @@ -3610,6 +3613,9 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ .driver_data = NVME_QUIRK_BROKEN_MSI }, + { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ + .driver_data = NVME_QUIRK_BROKEN_MSI | + NVME_QUIRK_NO_DEEPEST_PS }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 4f9cac8a5abe..259ad77c03c5 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1560,6 +1560,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock;
+ if (!queue->state_change) + return; + write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d00a3b015635..d1869e6de384 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -581,9 +581,11 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, cell->nbits = info->nbits; cell->np = info->np;
- if (cell->nbits) + if (cell->nbits) { cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, BITS_PER_BYTE); + cell->raw_len = ALIGN(cell->bytes, nvmem->word_size); + }
if (!IS_ALIGNED(cell->offset, nvmem->stride)) { dev_err(&nvmem->dev, @@ -592,6 +594,18 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, return -EINVAL; }
+ if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) { + dev_err(&nvmem->dev, + "cell %s raw len %zd unaligned to nvmem word size %d\n", + cell->name ?: "<unknown>", cell->raw_len, + nvmem->word_size); + + if (info->raw_len) + return -EINVAL; + + cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size); + } + return 0; }
@@ -824,7 +838,9 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod if (addr && len == (2 * sizeof(u32))) { info.bit_offset = be32_to_cpup(addr++); info.nbits = be32_to_cpup(addr); - if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) { + if (info.bit_offset >= BITS_PER_BYTE * info.bytes || + info.nbits < 1 || + info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) { dev_err(dev, "nvmem: invalid bits on %pOF\n", child); of_node_put(child); return -EINVAL; @@ -1617,21 +1633,29 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put); static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) { u8 *p, *b; - int i, extra, bit_offset = cell->bit_offset; + int i, extra, bytes_offset; + int bit_offset = cell->bit_offset;
p = b = buf; - if (bit_offset) { + + bytes_offset = bit_offset / BITS_PER_BYTE; + b += bytes_offset; + bit_offset %= BITS_PER_BYTE; + + if (bit_offset % BITS_PER_BYTE) { /* First shift */ - *b++ >>= bit_offset; + *p = *b++ >> bit_offset;
/* setup rest of the bytes if any */ for (i = 1; i < cell->bytes; i++) { /* Get bits from next byte and shift them towards msb */ - *p |= *b << (BITS_PER_BYTE - bit_offset); + *p++ |= *b << (BITS_PER_BYTE - bit_offset);
- p = b; - *b++ >>= bit_offset; + *p = *b++ >> bit_offset; } + } else if (p != b) { + memmove(p, b, cell->bytes - bytes_offset); + p += cell->bytes - 1; } else { /* point to the msb */ p += cell->bytes - 1; diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 116a39e804c7..a872c640b8c5 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -321,19 +321,32 @@ static int qfprom_reg_read(void *context, unsigned int reg, void *_val, size_t bytes) { struct qfprom_priv *priv = context; - u8 *val = _val; - int i = 0, words = bytes; + u32 *val = _val; void __iomem *base = priv->qfpcorrected; + int words = DIV_ROUND_UP(bytes, sizeof(u32)); + int i;
if (read_raw_data && priv->qfpraw) base = priv->qfpraw;
- while (words--) - *val++ = readb(base + reg + i++); + for (i = 0; i < words; i++) + *val++ = readl(base + reg + i * sizeof(u32));
return 0; }
+/* Align reads to word boundary */ +static void qfprom_fixup_dt_cell_info(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell) +{ + unsigned int byte_offset = cell->offset % sizeof(u32); + + cell->bit_offset += byte_offset * BITS_PER_BYTE; + cell->offset -= byte_offset; + if (byte_offset && !cell->nbits) + cell->nbits = cell->bytes * BITS_PER_BYTE; +} + static void qfprom_runtime_disable(void *data) { pm_runtime_disable(data); @@ -358,10 +371,11 @@ static int qfprom_probe(struct platform_device *pdev) struct nvmem_config econfig = { .name = "qfprom", .add_legacy_fixed_of_cells = true, - .stride = 1, - .word_size = 1, + .stride = 4, + .word_size = 4, .id = NVMEM_DEVID_AUTO, .reg_read = qfprom_reg_read, + .fixup_dt_cell_info = qfprom_fixup_dt_cell_info, }; struct device *dev = &pdev->dev; struct resource *res; diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c index ebc3f0b24166..d88f12c53242 100644 --- a/drivers/nvmem/rockchip-otp.c +++ b/drivers/nvmem/rockchip-otp.c @@ -59,7 +59,6 @@ #define RK3588_OTPC_AUTO_EN 0x08 #define RK3588_OTPC_INT_ST 0x84 #define RK3588_OTPC_DOUT0 0x20 -#define RK3588_NO_SECURE_OFFSET 0x300 #define RK3588_NBYTES 4 #define RK3588_BURST_NUM 1 #define RK3588_BURST_SHIFT 8 @@ -69,6 +68,7 @@
struct rockchip_data { int size; + int read_offset; const char * const *clks; int num_clks; nvmem_reg_read_t reg_read; @@ -196,7 +196,7 @@ static int rk3588_otp_read(void *context, unsigned int offset, addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES; addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES; addr_len = addr_end - addr_start; - addr_start += RK3588_NO_SECURE_OFFSET; + addr_start += otp->data->read_offset / RK3588_NBYTES;
buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL); if (!buf) @@ -274,12 +274,21 @@ static const struct rockchip_data px30_data = { .reg_read = px30_otp_read, };
+static const struct rockchip_data rk3576_data = { + .size = 0x100, + .read_offset = 0x700, + .clks = px30_otp_clocks, + .num_clks = ARRAY_SIZE(px30_otp_clocks), + .reg_read = rk3588_otp_read, +}; + static const char * const rk3588_otp_clocks[] = { "otp", "apb_pclk", "phy", "arb", };
static const struct rockchip_data rk3588_data = { .size = 0x400, + .read_offset = 0xc00, .clks = rk3588_otp_clocks, .num_clks = ARRAY_SIZE(rk3588_otp_clocks), .reg_read = rk3588_otp_read, @@ -294,6 +303,10 @@ static const struct of_device_id rockchip_otp_match[] = { .compatible = "rockchip,rk3308-otp", .data = &px30_data, }, + { + .compatible = "rockchip,rk3576-otp", + .data = &rk3576_data, + }, { .compatible = "rockchip,rk3588-otp", .data = &rk3588_data, diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 0d94e4a967d8..7cef00d9d7ab 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -194,6 +194,12 @@ config PCI_P2PDMA P2P DMA transactions must be between devices behind the same root port.
+ Enabling this option will reduce the entropy of x86 KASLR memory + regions. For example - on a 46 bit system, the entropy goes down + from 16 bits to 15 bits. The actual reduction in entropy depends + on the physical address bits, on processor features, kernel config + (5 level page table) and physical memory present on the system. + If unsure, say N.
config PCI_LABEL diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 6afff1f1b143..c331b108e71d 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -538,4 +538,37 @@ int pci_max_pasids(struct pci_dev *pdev) return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported)); } EXPORT_SYMBOL_GPL(pci_max_pasids); + +/** + * pci_pasid_status - Check the PASID status + * @pdev: PCI device structure + * + * Returns a negative value when no PASID capability is present. + * Otherwise the value of the control register is returned. + * Status reported are: + * + * PCI_PASID_CTRL_ENABLE - PASID enabled + * PCI_PASID_CTRL_EXEC - Execute permission enabled + * PCI_PASID_CTRL_PRIV - Privileged mode enabled + */ +int pci_pasid_status(struct pci_dev *pdev) +{ + int pasid; + u16 ctrl; + + if (pdev->is_virtfn) + pdev = pci_physfn(pdev); + + pasid = pdev->pasid_cap; + if (!pasid) + return -EINVAL; + + pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl); + + ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC | + PCI_PASID_CTRL_PRIV; + + return ctrl; +} +EXPORT_SYMBOL_GPL(pci_pasid_status); #endif /* CONFIG_PCI_PASID */ diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index dea19250598a..9e7e94f32b43 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -280,7 +280,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, u32 index; struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) { + for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) { if (ep->outbound_addr[index] != addr) continue; *atu_index = index; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 120e2aca5164..d428457d9c43 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -902,7 +902,7 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci) if (ret) return ret;
- mem = ioremap(atu.cpu_addr, pci->region_align); + mem = ioremap(pci->pp.msg_res->start, pci->region_align); if (!mem) return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 792d24cea574..81f085cebf62 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -309,8 +309,8 @@ static int brcm_pcie_encode_ibar_size(u64 size) if (log2_in >= 12 && log2_in <= 15) /* Covers 4KB to 32KB (inclusive) */ return (log2_in - 12) + 0x1c; - else if (log2_in >= 16 && log2_in <= 35) - /* Covers 64KB to 32GB, (inclusive) */ + else if (log2_in >= 16 && log2_in <= 36) + /* Covers 64KB to 64GB, (inclusive) */ return log2_in - 15; /* Something is awry so disable */ return 0; @@ -1947,3 +1947,4 @@ module_platform_driver(brcm_pcie_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); MODULE_AUTHOR("Broadcom"); +MODULE_SOFTDEP("pre: irq_bcm2712_mip"); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 94ceec50a2b9..8df064b62a2f 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -17,6 +17,8 @@ #include <linux/rculist.h> #include <linux/rcupdate.h>
+#include <xen/xen.h> + #include <asm/irqdomain.h>
#define VMD_CFGBAR 0 @@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) struct vmd_dev *vmd; int err;
+ if (xen_domain()) { + /* + * Xen doesn't have knowledge about devices in the VMD bus + * because the config space of devices behind the VMD bridge is + * not known to Xen, and hence Xen cannot discover or configure + * them in any way. + * + * Bypass of MSI remapping won't work in that case as direct + * write by Linux to the MSI entries won't result in functional + * interrupts, as Xen is the entity that manages the host + * interrupt controller and must configure interrupts. However + * multiplexing of interrupts by the VMD bridge will work under + * Xen, so force the usage of that mode which must always be + * supported by VMD bridges. + */ + features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP; + } + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM;
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c index 54286a40bdfb..6643a88c7a0c 100644 --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
static struct pci_epf_header sa8775p_header = { .vendorid = PCI_VENDOR_ID_QCOM, - .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */ + .deviceid = 0x0116, .baseclass_code = PCI_CLASS_OTHERS, .interrupt_pin = PCI_INTERRUPT_INTA, }; diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 14b4c68ab4e1..21aa3709e257 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -703,6 +703,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) if (ret) { pci_epf_free_space(epf, epf_test->reg[bar], bar, PRIMARY_INTERFACE); + epf_test->reg[bar] = NULL; dev_err(dev, "Failed to set BAR%d\n", bar); if (bar == test_reg_bar) return ret; @@ -878,6 +879,7 @@ static void pci_epf_test_free_space(struct pci_epf *epf)
pci_epf_free_space(epf, epf_test->reg[bar], bar, PRIMARY_INTERFACE); + epf_test->reg[bar] = NULL; } }
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index f16c7ce3bf3f..1eceabef9e84 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -814,11 +814,9 @@ static resource_size_t calculate_iosize(resource_size_t size, size = (size & 0xff) + ((size & ~0xffUL) << 2); #endif size = size + size1; - if (size < old_size) - size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align); - return size; + size = max(size, add_size) + children_add_size; + return ALIGN(max(size, old_size), align); }
static resource_size_t calculate_memsize(resource_size_t size, diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c index 0afe02f879b4..d9b56f0f9094 100644 --- a/drivers/perf/arm_pmuv3.c +++ b/drivers/perf/arm_pmuv3.c @@ -816,10 +816,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu) else armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0(); + /* Enable all counters */ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); - - kvm_vcpu_pmu_resync_el0(); }
static void armv8pmu_stop(struct arm_pmu *cpu_pmu) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 413f76e2d174..e0a6a272f571 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -405,13 +405,14 @@ EXPORT_SYMBOL_GPL(phy_power_off);
int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) { - int ret; + int ret = 0;
- if (!phy || !phy->ops->set_mode) + if (!phy) return 0;
mutex_lock(&phy->mutex); - ret = phy->ops->set_mode(phy, mode, submode); + if (phy->ops->set_mode) + ret = phy->ops->set_mode(phy, mode, submode); if (!ret) phy->attrs.mode = mode; mutex_unlock(&phy->mutex); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 513fd35dcaa9..b45aee8f5964 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -9,6 +9,7 @@ * Copyright (C) 2014 Cogent Embedded, Inc. */
+#include <linux/cleanup.h> #include <linux/extcon-provider.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -118,9 +119,8 @@ struct rcar_gen3_chan { struct regulator *vbus; struct reset_control *rstc; struct work_struct work; - struct mutex lock; /* protects rphys[...].powered */ + spinlock_t lock; /* protects access to hardware and driver data structure. */ enum usb_dr_mode dr_mode; - int irq; u32 obint_enable_bits; bool extcon_host; bool is_otg_channel; @@ -349,6 +349,8 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, bool is_b_device; enum phy_mode cur_mode, new_mode;
+ guard(spinlock_irqsave)(&ch->lock); + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO;
@@ -416,7 +418,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) val = readl(usb2_base + USB2_ADPCTRL); writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); } - msleep(20); + mdelay(20);
writel(0xffffffff, usb2_base + USB2_OBINTSTA); writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN); @@ -428,16 +430,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) { struct rcar_gen3_chan *ch = _ch; void __iomem *usb2_base = ch->base; - u32 status = readl(usb2_base + USB2_OBINTSTA); + struct device *dev = ch->dev; irqreturn_t ret = IRQ_NONE; + u32 status; + + pm_runtime_get_noresume(dev);
- if (status & ch->obint_enable_bits) { - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); - rcar_gen3_device_recognition(ch); - ret = IRQ_HANDLED; + if (pm_runtime_suspended(dev)) + goto rpm_put; + + scoped_guard(spinlock, &ch->lock) { + status = readl(usb2_base + USB2_OBINTSTA); + if (status & ch->obint_enable_bits) { + dev_vdbg(dev, "%s: %08x\n", __func__, status); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } }
+rpm_put: + pm_runtime_put_noidle(dev); return ret; }
@@ -447,17 +460,8 @@ static int rcar_gen3_phy_usb2_init(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; void __iomem *usb2_base = channel->base; u32 val; - int ret;
- if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); - ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, - IRQF_SHARED, dev_name(channel->dev), channel); - if (ret < 0) { - dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); - return ret; - } - } + guard(spinlock_irqsave)(&channel->lock);
/* Initialize USB2 part */ val = readl(usb2_base + USB2_INT_ENABLE); @@ -485,6 +489,8 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) void __iomem *usb2_base = channel->base; u32 val;
+ guard(spinlock_irqsave)(&channel->lock); + rphy->initialized = false;
val = readl(usb2_base + USB2_INT_ENABLE); @@ -493,9 +499,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) val &= ~USB2_INT_ENABLE_UCOM_INTEN; writel(val, usb2_base + USB2_INT_ENABLE);
- if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) - free_irq(channel->irq, channel); - return 0; }
@@ -507,16 +510,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) u32 val; int ret = 0;
- mutex_lock(&channel->lock); - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; - if (channel->vbus) { ret = regulator_enable(channel->vbus); if (ret) - goto out; + return ret; }
+ guard(spinlock_irqsave)(&channel->lock); + + if (!rcar_gen3_are_all_rphys_power_off(channel)) + goto out; + val = readl(usb2_base + USB2_USBCTR); val |= USB2_USBCTR_PLL_RST; writel(val, usb2_base + USB2_USBCTR); @@ -526,7 +530,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) out: /* The powered flag should be set for any other phys anyway */ rphy->powered = true; - mutex_unlock(&channel->lock);
return 0; } @@ -537,18 +540,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; int ret = 0;
- mutex_lock(&channel->lock); - rphy->powered = false; + scoped_guard(spinlock_irqsave, &channel->lock) { + rphy->powered = false;
- if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; + if (rcar_gen3_are_all_rphys_power_off(channel)) { + u32 val = readl(channel->base + USB2_USBCTR); + + val |= USB2_USBCTR_PLL_RST; + writel(val, channel->base + USB2_USBCTR); + } + }
if (channel->vbus) ret = regulator_disable(channel->vbus);
-out: - mutex_unlock(&channel->lock); - return ret; }
@@ -701,7 +706,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rcar_gen3_chan *channel; struct phy_provider *provider; - int ret = 0, i; + int ret = 0, i, irq;
if (!dev->of_node) { dev_err(dev, "This driver needs device tree\n"); @@ -717,8 +722,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) return PTR_ERR(channel->base);
channel->obint_enable_bits = USB2_OBINT_BITS; - /* get irq number here and request_irq for OTG in phy_init */ - channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { channel->is_otg_channel = true; @@ -761,7 +764,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (phy_data->no_adp_ctrl) channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
- mutex_init(&channel->lock); + spin_lock_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { channel->rphys[i].phy = devm_phy_create(dev, NULL, phy_data->phy_usb2_ops); @@ -787,6 +790,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->vbus = NULL; }
+ irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) { + ret = irq; + goto error; + } else if (irq > 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(dev), channel); + if (ret < 0) { + dev_err(dev, "Failed to request irq (%d)\n", irq); + goto error; + } + } + provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); if (IS_ERR(provider)) { dev_err(dev, "Failed to register PHY provider\n"); diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c index be6f1ca9095a..dc6e01dff5c7 100644 --- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c +++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c @@ -94,8 +94,8 @@ #define LCPLL_ALONE_MODE BIT(1) /* CMN_REG(0097) */ #define DIG_CLK_SEL BIT(1) -#define ROPLL_REF BIT(1) -#define LCPLL_REF 0 +#define LCPLL_REF BIT(1) +#define ROPLL_REF 0 /* CMN_REG(0099) */ #define CMN_ROPLL_ALONE_MODE BIT(2) #define ROPLL_ALONE_MODE BIT(2) diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c index f5c6d264d89e..d2021f7941e3 100644 --- a/drivers/phy/rockchip/phy-rockchip-usbdp.c +++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c @@ -187,6 +187,8 @@ struct rk_udphy { u32 dp_aux_din_sel; bool dp_sink_hpd_sel; bool dp_sink_hpd_cfg; + unsigned int link_rate; + unsigned int lanes; u8 bw; int id;
@@ -1102,15 +1104,19 @@ static int rk_udphy_dp_phy_power_off(struct phy *phy) return 0; }
-static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate) +/* + * Verify link rate + */ +static int rk_udphy_dp_phy_verify_link_rate(struct rk_udphy *udphy, + struct phy_configure_opts_dp *dp) { - switch (link_rate) { + switch (dp->link_rate) { case 1620: case 2700: case 5400: case 8100: + udphy->link_rate = dp->link_rate; break; - default: return -EINVAL; } @@ -1118,45 +1124,44 @@ static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate) return 0; }
-static int rk_udphy_dp_phy_verify_config(struct rk_udphy *udphy, - struct phy_configure_opts_dp *dp) +static int rk_udphy_dp_phy_verify_lanes(struct rk_udphy *udphy, + struct phy_configure_opts_dp *dp) { - int i, ret; - - /* If changing link rate was required, verify it's supported. */ - ret = rk_udphy_dp_phy_verify_link_rate(dp->link_rate); - if (ret) - return ret; - - /* Verify lane count. */ switch (dp->lanes) { case 1: case 2: case 4: /* valid lane count. */ + udphy->lanes = dp->lanes; break;
default: return -EINVAL; }
- /* - * If changing voltages is required, check swing and pre-emphasis - * levels, per-lane. - */ - if (dp->set_voltages) { - /* Lane count verified previously. */ - for (i = 0; i < dp->lanes; i++) { - if (dp->voltage[i] > 3 || dp->pre[i] > 3) - return -EINVAL; + return 0; +}
- /* - * Sum of voltage swing and pre-emphasis levels cannot - * exceed 3. - */ - if (dp->voltage[i] + dp->pre[i] > 3) - return -EINVAL; - } +/* + * If changing voltages is required, check swing and pre-emphasis + * levels, per-lane. + */ +static int rk_udphy_dp_phy_verify_voltages(struct rk_udphy *udphy, + struct phy_configure_opts_dp *dp) +{ + int i; + + /* Lane count verified previously. */ + for (i = 0; i < udphy->lanes; i++) { + if (dp->voltage[i] > 3 || dp->pre[i] > 3) + return -EINVAL; + + /* + * Sum of voltage swing and pre-emphasis levels cannot + * exceed 3. + */ + if (dp->voltage[i] + dp->pre[i] > 3) + return -EINVAL; }
return 0; @@ -1196,9 +1201,23 @@ static int rk_udphy_dp_phy_configure(struct phy *phy, u32 i, val, lane; int ret;
- ret = rk_udphy_dp_phy_verify_config(udphy, dp); - if (ret) - return ret; + if (dp->set_rate) { + ret = rk_udphy_dp_phy_verify_link_rate(udphy, dp); + if (ret) + return ret; + } + + if (dp->set_lanes) { + ret = rk_udphy_dp_phy_verify_lanes(udphy, dp); + if (ret) + return ret; + } + + if (dp->set_voltages) { + ret = rk_udphy_dp_phy_verify_voltages(udphy, dp); + if (ret) + return ret; + }
if (dp->set_rate) { regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET, @@ -1243,9 +1262,9 @@ static int rk_udphy_dp_phy_configure(struct phy *phy, }
if (dp->set_voltages) { - for (i = 0; i < dp->lanes; i++) { + for (i = 0; i < udphy->lanes; i++) { lane = udphy->dp_lane_sel[i]; - switch (dp->link_rate) { + switch (udphy->link_rate) { case 1620: case 2700: regmap_update_bits(udphy->pma_regmap, diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index 46b8f6987c62..28d02ae60cc1 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -1513,8 +1513,11 @@ static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = { PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00), PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36), PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06), - PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00), - PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36), + PHY_TUNING_ENTRY_PMA(0x19e0, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x19e4, -1, 0x36), + /* fix bootloader bug */ + PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x02), + PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x0b), /* improve LVCC */ PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30), PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30), diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c index cf6efa9c0364..a039b490cdb8 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c @@ -72,7 +72,7 @@ static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI; struct bcm281xx_pin_function { const char *name; const char * const *groups; - const unsigned ngroups; + const unsigned int ngroups; };
/* @@ -84,10 +84,10 @@ struct bcm281xx_pinctrl_data {
/* List of all pins */ const struct pinctrl_pin_desc *pins; - const unsigned npins; + const unsigned int npins;
const struct bcm281xx_pin_function *functions; - const unsigned nfunctions; + const unsigned int nfunctions;
struct regmap *regmap; }; @@ -941,7 +941,7 @@ static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = { };
static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev, - unsigned pin) + unsigned int pin) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -985,7 +985,7 @@ static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) }
static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev, - unsigned group) + unsigned int group) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -993,9 +993,9 @@ static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev, }
static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, - unsigned group, + unsigned int group, const unsigned **pins, - unsigned *num_pins) + unsigned int *num_pins) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -1007,7 +1007,7 @@ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, - unsigned offset) + unsigned int offset) { seq_printf(s, " %s", dev_name(pctldev->dev)); } @@ -1029,7 +1029,7 @@ static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev) }
static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev, - unsigned function) + unsigned int function) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -1037,9 +1037,9 @@ static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev, }
static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, - unsigned function, + unsigned int function, const char * const **groups, - unsigned * const num_groups) + unsigned int * const num_groups) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -1050,8 +1050,8 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, }
static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev, - unsigned function, - unsigned group) + unsigned int function, + unsigned int group) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); const struct bcm281xx_pin_function *f = &pdata->functions[function]; @@ -1082,7 +1082,7 @@ static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = { };
static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *config) { return -ENOTSUPP; @@ -1091,9 +1091,9 @@ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
/* Goes through the configs and update register val/mask */ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs, + unsigned int num_configs, u32 *val, u32 *mask) { @@ -1207,9 +1207,9 @@ static const u16 bcm281xx_pullup_map[] = {
/* Goes through the configs and update register val/mask */ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs, + unsigned int num_configs, u32 *val, u32 *mask) { @@ -1277,9 +1277,9 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
/* Goes through the configs and update register val/mask */ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs, + unsigned int num_configs, u32 *val, u32 *mask) { @@ -1321,9 +1321,9 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev, }
static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs) + unsigned int num_configs) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); enum bcm281xx_pin_type pin_type; diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 6a94ecd6a8de..0b7f74beb6a6 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -143,10 +143,14 @@ static int dt_to_map_one_config(struct pinctrl *p, pctldev = get_pinctrl_dev_from_of_node(np_pctldev); if (pctldev) break; - /* Do not defer probing of hogs (circular loop) */ + /* + * Do not defer probing of hogs (circular loop) + * + * Return 1 to let the caller catch the case. + */ if (np_pctldev == p->dev->of_node) { of_node_put(np_pctldev); - return -ENODEV; + return 1; } } of_node_put(np_pctldev); @@ -265,6 +269,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) ret = dt_to_map_one_config(p, pctldev, statename, np_config); of_node_put(np_config); + if (ret == 1) + continue; if (ret < 0) goto err; } diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 253a0cc57e39..e5a32a0532ee 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: if (meson_pinconf_get_pull(pc, pin) == param) - arg = 1; + arg = 60000; else return -EINVAL; break; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index a6bdff7a0bb2..018e96d921c0 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -43,7 +43,6 @@ * @pctrl: pinctrl handle. * @chip: gpiochip handle. * @desc: pin controller descriptor - * @restart_nb: restart notifier block. * @irq: parent irq for the TLMM irq_chip. * @intr_target_use_scm: route irq to application cpu using scm calls * @lock: Spinlock to protect register resources as well @@ -63,7 +62,6 @@ struct msm_pinctrl { struct pinctrl_dev *pctrl; struct gpio_chip chip; struct pinctrl_desc desc; - struct notifier_block restart_nb;
int irq;
@@ -1470,10 +1468,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return 0; }
-static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, - void *data) +static int msm_ps_hold_restart(struct sys_off_data *data) { - struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); + struct msm_pinctrl *pctrl = data->cb_data;
writel(0, pctrl->regs[0] + PS_HOLD_OFFSET); mdelay(1000); @@ -1484,7 +1481,11 @@ static struct msm_pinctrl *poweroff_pctrl;
static void msm_ps_hold_poweroff(void) { - msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL); + struct sys_off_data data = { + .cb_data = poweroff_pctrl, + }; + + msm_ps_hold_restart(&data); }
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) @@ -1494,9 +1495,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
for (i = 0; i < pctrl->soc->nfunctions; i++) if (!strcmp(func[i].name, "ps_hold")) { - pctrl->restart_nb.notifier_call = msm_ps_hold_restart; - pctrl->restart_nb.priority = 128; - if (register_restart_handler(&pctrl->restart_nb)) + if (devm_register_sys_off_handler(pctrl->dev, + SYS_OFF_MODE_RESTART, + 128, + msm_ps_hold_restart, + pctrl)) dev_err(pctrl->dev, "failed to setup restart handler.\n"); poweroff_pctrl = pctrl; @@ -1598,8 +1601,6 @@ void msm_pinctrl_remove(struct platform_device *pdev) struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip); - - unregister_restart_handler(&pctrl->restart_nb); } EXPORT_SYMBOL(msm_pinctrl_remove);
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index d90685cfe2e1..bde58f5a743c 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -311,6 +311,7 @@ struct rzg2l_pinctrl_pin_settings { * @pmc: PMC registers cache * @pfc: PFC registers cache * @iolh: IOLH registers cache + * @pupd: PUPD registers cache * @ien: IEN registers cache * @sd_ch: SD_CH registers cache * @eth_poc: ET_POC registers cache @@ -324,6 +325,7 @@ struct rzg2l_pinctrl_reg_cache { u32 *pfc; u32 *iolh[2]; u32 *ien[2]; + u32 *pupd[2]; u8 sd_ch[2]; u8 eth_poc[2]; u8 eth_mode; @@ -2539,6 +2541,11 @@ static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl) if (!cache->ien[i]) return -ENOMEM;
+ cache->pupd[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pupd[i]), + GFP_KERNEL); + if (!cache->pupd[i]) + return -ENOMEM; + /* Allocate dedicated cache. */ dedicated_cache->iolh[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins, sizeof(*dedicated_cache->iolh[i]), @@ -2779,7 +2786,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
for (u32 port = 0; port < nports; port++) { - bool has_iolh, has_ien; + bool has_iolh, has_ien, has_pupd; u32 off, caps; u8 pincnt; u64 cfg; @@ -2791,6 +2798,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen caps = FIELD_GET(PIN_CFG_MASK, cfg); has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C)); has_ien = !!(caps & PIN_CFG_IEN); + has_pupd = !!(caps & PIN_CFG_PUPD);
if (suspend) RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]); @@ -2809,6 +2817,15 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen } }
+ if (has_pupd) { + RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off), + cache->pupd[0][port]); + if (pincnt >= 4) { + RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off), + cache->pupd[1][port]); + } + } + RZG2L_PCTRL_REG_ACCESS16(suspend, pctrl->base + PM(off), cache->pm[port]); RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + P(off), cache->p[port]);
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c index 57f2674e75d6..84b4850771ce 100644 --- a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c +++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c @@ -574,10 +574,10 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, struct cv1800_pin *pin, unsigned long *configs, unsigned int num_configs, - u32 *value) + u32 *value, u32 *mask) { int i; - u32 v = 0; + u32 v = 0, m = 0; enum cv1800_pin_io_type type; int ret;
@@ -596,10 +596,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, case PIN_CONFIG_BIAS_PULL_DOWN: v &= ~PIN_IO_PULLDOWN; v |= FIELD_PREP(PIN_IO_PULLDOWN, arg); + m |= PIN_IO_PULLDOWN; break; case PIN_CONFIG_BIAS_PULL_UP: v &= ~PIN_IO_PULLUP; v |= FIELD_PREP(PIN_IO_PULLUP, arg); + m |= PIN_IO_PULLUP; break; case PIN_CONFIG_DRIVE_STRENGTH_UA: ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg); @@ -607,6 +609,7 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, return ret; v &= ~PIN_IO_DRIVE; v |= FIELD_PREP(PIN_IO_DRIVE, ret); + m |= PIN_IO_DRIVE; break; case PIN_CONFIG_INPUT_SCHMITT_UV: ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg); @@ -614,6 +617,7 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, return ret; v &= ~PIN_IO_SCHMITT; v |= FIELD_PREP(PIN_IO_SCHMITT, ret); + m |= PIN_IO_SCHMITT; break; case PIN_CONFIG_POWER_SOURCE: /* Ignore power source as it is always fixed */ @@ -621,10 +625,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, case PIN_CONFIG_SLEW_RATE: v &= ~PIN_IO_OUT_FAST_SLEW; v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg); + m |= PIN_IO_OUT_FAST_SLEW; break; case PIN_CONFIG_BIAS_BUS_HOLD: v &= ~PIN_IO_BUS_HOLD; v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg); + m |= PIN_IO_BUS_HOLD; break; default: return -ENOTSUPP; @@ -632,17 +638,19 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl, }
*value = v; + *mask = m;
return 0; }
static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl, unsigned int pin_id, - u32 value) + u32 value, u32 mask) { struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id); unsigned long flags; void __iomem *addr; + u32 reg;
if (!pin) return -EINVAL; @@ -650,7 +658,10 @@ static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl, addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
raw_spin_lock_irqsave(&pctrl->lock, flags); - writel(value, addr); + reg = readl(addr); + reg &= ~mask; + reg |= value; + writel(reg, addr); raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0; @@ -662,16 +673,17 @@ static int cv1800_pconf_set(struct pinctrl_dev *pctldev, { struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id); - u32 value; + u32 value, mask;
if (!pin) return -ENODEV;
if (cv1800_pinconf_compute_config(pctrl, pin, - configs, num_configs, &value)) + configs, num_configs, + &value, &mask)) return -ENOTSUPP;
- return cv1800_pin_set_config(pctrl, pin_id, value); + return cv1800_pin_set_config(pctrl, pin_id, value, mask); }
static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev, @@ -682,7 +694,7 @@ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev, struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); const struct group_desc *group; const struct cv1800_pin_mux_config *pinmuxs; - u32 value; + u32 value, mask; int i;
group = pinctrl_generic_get_group(pctldev, gsel); @@ -692,11 +704,12 @@ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev, pinmuxs = group->data;
if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin, - configs, num_configs, &value)) + configs, num_configs, + &value, &mask)) return -ENOTSUPP;
for (i = 0; i < group->grp.npins; i++) - cv1800_pin_set_config(pctrl, group->grp.pins[i], value); + cv1800_pin_set_config(pctrl, group->grp.pins[i], value, mask);
return 0; } diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 3b046450bd3f..edcc78ebce45 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -278,8 +278,8 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, return 0; }
-static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, - unsigned int offset) +static int tegra_pinctrl_get_group_index(struct pinctrl_dev *pctldev, + unsigned int offset) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); unsigned int group, num_pins, j; @@ -292,12 +292,35 @@ static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev * continue; for (j = 0; j < num_pins; j++) { if (offset == pins[j]) - return &pmx->soc->groups[group]; + return group; } }
- dev_err(pctldev->dev, "Pingroup not found for pin %u\n", offset); - return NULL; + return -EINVAL; +} + +static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, + unsigned int offset, + int group_index) +{ + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + if (group_index < 0 || group_index >= pmx->soc->ngroups) + return NULL; + + return &pmx->soc->groups[group_index]; +} + +static struct tegra_pingroup_config *tegra_pinctrl_get_group_config(struct pinctrl_dev *pctldev, + unsigned int offset, + int group_index) +{ + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + if (group_index < 0) + return NULL; + + return &pmx->pingroup_configs[group_index]; }
static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, @@ -306,12 +329,15 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *group; + struct tegra_pingroup_config *config; + int group_index; u32 value;
if (!pmx->soc->sfsel_in_mux) return 0;
- group = tegra_pinctrl_get_group(pctldev, offset); + group_index = tegra_pinctrl_get_group_index(pctldev, offset); + group = tegra_pinctrl_get_group(pctldev, offset, group_index);
if (!group) return -EINVAL; @@ -319,7 +345,11 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, if (group->mux_reg < 0 || group->sfsel_bit < 0) return -EINVAL;
+ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index); + if (!config) + return -EINVAL; value = pmx_readl(pmx, group->mux_bank, group->mux_reg); + config->is_sfsel = (value & BIT(group->sfsel_bit)) != 0; value &= ~BIT(group->sfsel_bit); pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
@@ -332,12 +362,15 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev, { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *group; + struct tegra_pingroup_config *config; + int group_index; u32 value;
if (!pmx->soc->sfsel_in_mux) return;
- group = tegra_pinctrl_get_group(pctldev, offset); + group_index = tegra_pinctrl_get_group_index(pctldev, offset); + group = tegra_pinctrl_get_group(pctldev, offset, group_index);
if (!group) return; @@ -345,8 +378,12 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev, if (group->mux_reg < 0 || group->sfsel_bit < 0) return;
+ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index); + if (!config) + return; value = pmx_readl(pmx, group->mux_bank, group->mux_reg); - value |= BIT(group->sfsel_bit); + if (config->is_sfsel) + value |= BIT(group->sfsel_bit); pmx_writel(pmx, value, group->mux_bank, group->mux_reg); }
@@ -791,6 +828,12 @@ int tegra_pinctrl_probe(struct platform_device *pdev, pmx->dev = &pdev->dev; pmx->soc = soc_data;
+ pmx->pingroup_configs = devm_kcalloc(&pdev->dev, + pmx->soc->ngroups, sizeof(*pmx->pingroup_configs), + GFP_KERNEL); + if (!pmx->pingroup_configs) + return -ENOMEM; + /* * Each mux group will appear in 4 functions' list of groups. * This over-allocates slightly, since not all groups are mux groups. diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h index b3289bdf727d..b97136685f7a 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.h +++ b/drivers/pinctrl/tegra/pinctrl-tegra.h @@ -8,6 +8,10 @@ #ifndef __PINMUX_TEGRA_H__ #define __PINMUX_TEGRA_H__
+struct tegra_pingroup_config { + bool is_sfsel; +}; + struct tegra_pmx { struct device *dev; struct pinctrl_dev *pctl; @@ -21,6 +25,8 @@ struct tegra_pmx { int nbanks; void __iomem **regs; u32 *backup_regs; + /* Array of size soc->ngroups */ + struct tegra_pingroup_config *pingroup_configs; };
enum tegra_pinconf_param { diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index a1cff9ff35a9..9d79c5ea8b49 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -304,6 +304,7 @@ struct asus_wmi {
u32 kbd_rgb_dev; bool kbd_rgb_state_available; + bool oobe_state_available;
u8 throttle_thermal_policy_mode; u32 throttle_thermal_policy_dev; @@ -1826,7 +1827,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus) goto error; }
- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) { + if (asus->oobe_state_available) { /* * Disable OOBE state, so that e.g. the keyboard backlight * works. @@ -4741,6 +4742,7 @@ static int asus_wmi_add(struct platform_device *pdev) asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); + asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE); asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) && dmi_check_system(asus_ally_mcu_quirk);
@@ -4994,6 +4996,13 @@ static int asus_hotk_restore(struct device *device) } if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) kbd_led_update(asus); + if (asus->oobe_state_available) { + /* + * Disable OOBE state, so that e.g. the keyboard backlight + * works. + */ + asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL); + }
if (asus_wmi_has_fnlock_key(asus)) asus_wmi_fnlock_update(asus); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 230e6ee96636..d8f1bf5e58a0 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj, int length;
length = strlen(buf); - if (buf[length-1] == '\n') + if (length && buf[length - 1] == '\n') length--;
/* firmware does verifiation of min/max password length, diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index c908f52ed717..bdb4cbee4205 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1297,6 +1297,16 @@ static const struct key_entry ideapad_keymap[] = { /* Specific to some newer models */ { KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } }, { KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } }, + /* Star- (User Assignable Key) */ + { KE_KEY, 0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, + /* Eye */ + { KE_KEY, 0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } }, + /* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */ + { KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } }, + /* shift + prtsc */ + { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } }, + { KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } }, + { KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } },
{ KE_END }, }; @@ -2083,6 +2093,12 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data) dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n", data->integer.value);
+ /* performance button triggered by 0x3d */ + if (data->integer.value == 0x3d && priv->dytc) { + platform_profile_cycle(); + break; + } + /* 0x02 FnLock, 0x03 Esc */ if (data->integer.value == 0x02 || data->integer.value == 0x03) ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02); diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 9a609358956f..59392f1a0d8a 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -44,16 +44,17 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = { - {"INT33D5", 0}, - {"INTC1051", 0}, - {"INTC1054", 0}, - {"INTC1070", 0}, - {"INTC1076", 0}, - {"INTC1077", 0}, - {"INTC1078", 0}, - {"INTC107B", 0}, - {"INTC10CB", 0}, - {"", 0}, + { "INT33D5" }, + { "INTC1051" }, + { "INTC1054" }, + { "INTC1070" }, + { "INTC1076" }, + { "INTC1077" }, + { "INTC1078" }, + { "INTC107B" }, + { "INTC10CB" }, + { "INTC10CC" }, + { } }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 4cfb53206cb8..1abd8378f158 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -995,8 +995,8 @@ static ssize_t current_value_store(struct kobject *kobj, ret = -EINVAL; goto out; } - set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name, - new_setting, tlmi_priv.pwd_admin->signature); + set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name, + new_setting, tlmi_priv.pwd_admin->signature); if (!set_str) { ret = -ENOMEM; goto out; @@ -1026,7 +1026,7 @@ static ssize_t current_value_store(struct kobject *kobj, goto out; }
- set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name, new_setting); if (!set_str) { ret = -ENOMEM; @@ -1054,11 +1054,11 @@ static ssize_t current_value_store(struct kobject *kobj, }
if (auth_str) - set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name, - new_setting, auth_str); + set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name, + new_setting, auth_str); else - set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name, - new_setting); + set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name, + new_setting); if (!set_str) { ret = -ENOMEM; goto out; @@ -1568,9 +1568,6 @@ static int tlmi_analyze(void) continue; }
- /* It is not allowed to have '/' for file name. Convert it into ''. */ - strreplace(item, '/', '\'); - /* Remove the value part */ strreplace(item, ',', '\0');
@@ -1582,11 +1579,16 @@ static int tlmi_analyze(void) goto fail_clear_attr; } setting->index = i; + + strscpy(setting->name, item); + /* It is not allowed to have '/' for file name. Convert it into ''. */ + strreplace(item, '/', '\'); strscpy(setting->display_name, item); + /* If BIOS selections supported, load those */ if (tlmi_priv.can_get_bios_selections) { - ret = tlmi_get_bios_selections(setting->display_name, - &setting->possible_values); + ret = tlmi_get_bios_selections(setting->name, + &setting->possible_values); if (ret || !setting->possible_values) pr_info("Error retrieving possible values for %d : %s\n", i, setting->display_name); diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h index e1975ffebeb4..7f9632a53736 100644 --- a/drivers/platform/x86/think-lmi.h +++ b/drivers/platform/x86/think-lmi.h @@ -84,6 +84,7 @@ struct tlmi_pwd_setting { struct tlmi_attr_setting { struct kobject kobj; int index; + char name[TLMI_SETTINGS_MAXLEN]; char display_name[TLMI_SETTINGS_MAXLEN]; char *possible_values; }; diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c index 88819659df83..05913e9fe082 100644 --- a/drivers/pmdomain/core.c +++ b/drivers/pmdomain/core.c @@ -3043,7 +3043,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev, /* Verify that the index is within a valid range. */ num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", "#power-domain-cells"); - if (index >= num_domains) + if (num_domains < 0 || index >= num_domains) return NULL;
/* Allocate and register device on the genpd bus. */ diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c index e03c2cb39a69..0dbf1893abfa 100644 --- a/drivers/pmdomain/imx/gpcv2.c +++ b/drivers/pmdomain/imx/gpcv2.c @@ -1361,7 +1361,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) }
if (IS_ENABLED(CONFIG_LOCKDEP) && - of_property_read_bool(domain->dev->of_node, "power-domains")) + of_property_present(domain->dev->of_node, "power-domains")) lockdep_set_subclass(&domain->genpd.mlock, 1);
ret = of_genpd_add_provider_simple(domain->dev->of_node, diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c index 66409cff2083..e001b5c25bed 100644 --- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c +++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c @@ -338,11 +338,6 @@ static int __init rcar_gen4_sysc_pd_init(void) struct rcar_gen4_sysc_pd *pd; size_t n;
- if (!area->name) { - /* Skip NULLified area */ - continue; - } - n = strlen(area->name) + 1; pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c index b99326917330..1e2948523789 100644 --- a/drivers/pmdomain/renesas/rcar-sysc.c +++ b/drivers/pmdomain/renesas/rcar-sysc.c @@ -396,11 +396,6 @@ static int __init rcar_sysc_pd_init(void) struct rcar_sysc_pd *pd; size_t n;
- if (!area->name) { - /* Skip NULLified area */ - continue; - } - n = strlen(area->name) + 1; pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index 57eba1ddb17b..88fbae613e8b 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -89,6 +89,8 @@ #define AXP717_BAT_CC_MIN_UA 0 #define AXP717_BAT_CC_MAX_UA 3008000
+#define AXP717_TS_PIN_DISABLE BIT(4) + struct axp20x_batt_ps;
struct axp_data { @@ -117,6 +119,7 @@ struct axp20x_batt_ps { /* Maximum constant charge current */ unsigned int max_ccc; const struct axp_data *data; + bool ts_disable; };
static int axp20x_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt, @@ -983,6 +986,24 @@ static void axp717_set_battery_info(struct platform_device *pdev, int ccc = info->constant_charge_current_max_ua; int val;
+ axp_batt->ts_disable = (device_property_read_bool(axp_batt->dev, + "x-powers,no-thermistor")); + + /* + * Under rare conditions an incorrectly programmed efuse for + * the temp sensor on the PMIC may trigger a fault condition. + * Allow users to hard-code if the ts pin is not used to work + * around this problem. Note that this requires the battery + * be correctly defined in the device tree with a monitored + * battery node. + */ + if (axp_batt->ts_disable) { + regmap_update_bits(axp_batt->regmap, + AXP717_TS_PIN_CFG, + AXP717_TS_PIN_DISABLE, + AXP717_TS_PIN_DISABLE); + } + if (vmin > 0 && axp717_set_voltage_min_design(axp_batt, vmin)) dev_err(&pdev->dev, "couldn't set voltage_min_design\n"); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 1a936829975e..efbd80db778d 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -315,6 +315,8 @@ struct ptp_ocp_serial_port { #define OCP_BOARD_ID_LEN 13 #define OCP_SERIAL_LEN 6 #define OCP_SMA_NUM 4 +#define OCP_SIGNAL_NUM 4 +#define OCP_FREQ_NUM 4
enum { PORT_GNSS, @@ -342,8 +344,8 @@ struct ptp_ocp { struct dcf_master_reg __iomem *dcf_out; struct dcf_slave_reg __iomem *dcf_in; struct tod_reg __iomem *nmea_out; - struct frequency_reg __iomem *freq_in[4]; - struct ptp_ocp_ext_src *signal_out[4]; + struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM]; + struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM]; struct ptp_ocp_ext_src *pps; struct ptp_ocp_ext_src *ts0; struct ptp_ocp_ext_src *ts1; @@ -378,10 +380,12 @@ struct ptp_ocp { u32 utc_tai_offset; u32 ts_window_adjust; u64 fw_cap; - struct ptp_ocp_signal signal[4]; + struct ptp_ocp_signal signal[OCP_SIGNAL_NUM]; struct ptp_ocp_sma_connector sma[OCP_SMA_NUM]; const struct ocp_sma_op *sma_op; struct dpll_device *dpll; + int signals_nr; + int freq_in_nr; };
#define OCP_REQ_TIMESTAMP BIT(0) @@ -2693,6 +2697,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->eeprom_map = fb_eeprom_map; bp->fw_version = ioread32(&bp->image->version); bp->sma_op = &ocp_fb_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4;
ptp_ocp_fb_set_version(bp);
@@ -2858,6 +2864,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->fw_version = ioread32(&bp->reg->version); bp->fw_tag = 2; bp->sma_op = &ocp_art_sma_op; + bp->signals_nr = 4; + bp->freq_in_nr = 4;
/* Enable MAC serial port during initialisation */ iowrite32(1, &bp->board_config->mro50_serial_activate); @@ -2884,6 +2892,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->flash_start = 0xA00000; bp->eeprom_map = fb_eeprom_map; bp->sma_op = &ocp_adva_sma_op; + bp->signals_nr = 2; + bp->freq_in_nr = 2;
version = ioread32(&bp->image->version); /* if lower 16 bits are empty, this is the fw loader. */ @@ -4004,7 +4014,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) { struct signal_reg __iomem *reg = bp->signal_out[nr]->mem; struct ptp_ocp_signal *signal = &bp->signal[nr]; - char label[8]; + char label[16]; bool on; u32 val;
@@ -4030,7 +4040,7 @@ static void _frequency_summary_show(struct seq_file *s, int nr, struct frequency_reg __iomem *reg) { - char label[8]; + char label[16]; bool on; u32 val;
@@ -4174,11 +4184,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) }
if (bp->fw_cap & OCP_CAP_SIGNAL) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->signals_nr; i++) _signal_summary_show(s, bp, i);
if (bp->fw_cap & OCP_CAP_FREQ) - for (i = 0; i < 4; i++) + for (i = 0; i < bp->freq_in_nr; i++) _frequency_summary_show(s, i, bp->freq_in[i]);
if (bp->irig_out) { diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c index 40f7dba42b5a..404cbe32711e 100644 --- a/drivers/regulator/ad5398.c +++ b/drivers/regulator/ad5398.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h>
#define AD5398_CURRENT_EN_MASK 0x8000
@@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client) const struct ad5398_current_data_format *df = (struct ad5398_current_data_format *)id->driver_data;
- if (!init_data) - return -EINVAL; - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM;
config.dev = &client->dev; + if (client->dev.of_node) + init_data = of_get_regulator_init_data(&client->dev, + client->dev.of_node, + &ad5398_reg); + if (!init_data) + return -EINVAL; + config.init_data = init_data; + config.of_node = client->dev.of_node; config.driver_data = chip;
chip->client = client; diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index a7bb9da27029..a2ae6adf0053 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = { .pmu_offset = 0x1004, .spare_offset = 0x1088,
- .pd_names = { "mx", "cx" }, + .pd_names = { "cx", "mx" }, .vregs = (struct wcnss_vreg_info[]) { - { "vddmx", 950000, 1150000, 0 }, { "vddcx", .super_turbo = true}, + { "vddmx", 950000, 1150000, 0 }, { "vddpx", 1800000, 1800000, 0 }, }, .num_pd_vregs = 2, @@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = { .pmu_offset = 0x1004, .spare_offset = 0x1088,
- .pd_names = { "mx", "cx" }, + .pd_names = { "cx", "mx" }, .vregs = (struct wcnss_vreg_info[]) { - { "vddmx", 1287500, 1287500, 0 }, { "vddcx", .super_turbo = true }, + { "vddmx", 1287500, 1287500, 0 }, { "vddpx", 1800000, 1800000, 0 }, }, .num_pd_vregs = 2, @@ -397,8 +397,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev) static int wcnss_init_pds(struct qcom_wcnss *wcnss, const char * const pd_names[WCNSS_MAX_PDS]) { + struct device *dev = wcnss->dev; int i, ret;
+ /* Handle single power domain */ + if (dev->pm_domain) { + wcnss->pds[0] = dev; + wcnss->num_pds = 1; + pm_runtime_enable(dev); + return 0; + } + for (i = 0; i < WCNSS_MAX_PDS; i++) { if (!pd_names[i]) break; @@ -418,8 +427,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss,
static void wcnss_release_pds(struct qcom_wcnss *wcnss) { + struct device *dev = wcnss->dev; int i;
+ /* Handle single power domain */ + if (wcnss->num_pds == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + for (i = 0; i < wcnss->num_pds; i++) dev_pm_domain_detach(wcnss->pds[i], false); } @@ -437,10 +453,14 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, * the regulators for the power domains. For old device trees we need to * reserve extra space to manage them through the regulator interface. */ - if (wcnss->num_pds) - info += num_pd_vregs; - else + if (wcnss->num_pds) { + info += wcnss->num_pds; + /* Handle single power domain case */ + if (wcnss->num_pds < num_pd_vregs) + num_vregs += num_pd_vregs - wcnss->num_pds; + } else { num_vregs += num_pd_vregs; + }
bulk = devm_kcalloc(wcnss->dev, num_vregs, sizeof(struct regulator_bulk_data), diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 872e0b679be4..5efbe69bf5ca 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -1807,10 +1807,8 @@ static int ds1307_probe(struct i2c_client *client) * For some variants, be sure alarms can trigger when we're * running on Vbackup (BBSQI/BBSQW) */ - if (want_irq || ds1307_can_wakeup_device) { + if (want_irq || ds1307_can_wakeup_device) regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit; - regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); - }
regmap_write(ds1307->regmap, DS1337_REG_CONTROL, regs[0]); diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c index 35b2e36b426a..cb01038a2e27 100644 --- a/drivers/rtc/rtc-rv3032.c +++ b/drivers/rtc/rtc-rv3032.c @@ -69,7 +69,7 @@ #define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5) #define RV3032_CLKOUT2_OS BIT(7)
-#define RV3032_CTRL1_EERD BIT(3) +#define RV3032_CTRL1_EERD BIT(2) #define RV3032_CTRL1_WADA BIT(5)
#define RV3032_CTRL2_STOP BIT(0) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 9f76f2d7b66e..3bac163057d8 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -851,48 +851,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) vfio_put_device(&matrix_mdev->vdev); }
-#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ - "already assigned to %s" +#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s"
-static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, - unsigned long *apm, - unsigned long *aqm) +#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" + +static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, + struct ap_matrix_mdev *assigned_to, + unsigned long *apm, unsigned long *aqm) { unsigned long apid, apqi; - const struct device *dev = mdev_dev(matrix_mdev->mdev); - const char *mdev_name = dev_name(dev);
- for_each_set_bit_inv(apid, apm, AP_DEVICES) + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, + apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); + } + } +} + +static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, + unsigned long *apm, unsigned long *aqm) +{ + unsigned long apid, apqi; + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) - dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); + dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); + } }
/** * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs * + * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being + * assigned; or, NULL if this function was called by the AP bus + * driver in_use callback to verify none of the APQNs being reserved + * for the host device driver are in use by a vfio_ap mediated device * @mdev_apm: mask indicating the APIDs of the APQNs to be verified * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified * - * Verifies that each APQN derived from the Cartesian product of a bitmap of - * AP adapter IDs and AP queue indexes is not configured for any matrix - * mediated device. AP queue sharing is not allowed. + * Verifies that each APQN derived from the Cartesian product of APIDs + * represented by the bits set in @mdev_apm and the APQIs of the bits set in + * @mdev_aqm is not assigned to a mediated device other than the mdev to which + * the APQN is being assigned (@assignee). AP queue sharing is not allowed. * * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. */ -static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, +static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, + unsigned long *mdev_apm, unsigned long *mdev_aqm) { - struct ap_matrix_mdev *matrix_mdev; + struct ap_matrix_mdev *assigned_to; DECLARE_BITMAP(apm, AP_DEVICES); DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { /* - * If the input apm and aqm are fields of the matrix_mdev - * object, then move on to the next matrix_mdev. + * If the mdev to which the mdev_apm and mdev_aqm is being + * assigned is the same as the mdev being verified */ - if (mdev_apm == matrix_mdev->matrix.apm && - mdev_aqm == matrix_mdev->matrix.aqm) + if (assignee == assigned_to) continue;
memset(apm, 0, sizeof(apm)); @@ -902,15 +920,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, * We work on full longs, as we can only exclude the leftover * bits in non-inverse order. The leftover is all zeros. */ - if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, - AP_DEVICES)) + if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) continue;
- if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, - AP_DOMAINS)) + if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) continue;
- vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); + if (assignee) + vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); + else + vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm);
return -EADDRINUSE; } @@ -939,7 +958,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) matrix_mdev->matrix.aqm)) return -EADDRNOTAVAIL;
- return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, + return vfio_ap_mdev_verify_no_sharing(matrix_mdev, + matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm); }
@@ -2467,7 +2487,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
mutex_lock(&matrix_dev->guests_lock); mutex_lock(&matrix_dev->mdevs_lock); - ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); + ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); mutex_unlock(&matrix_dev->mdevs_lock); mutex_unlock(&matrix_dev->guests_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 6e8d8a96c54f..f2e4237ff3d9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -227,10 +227,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return;
- /* check for recovered fabric node */ - if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && - ndlp->nlp_DID == Fabric_DID) + /* Ignore callback for a mismatched (stale) rport */ + if (ndlp->rport != rport) { + lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE, + "6788 fc rport mismatch: d_id x%06x ndlp x%px " + "fc rport x%px node rport x%px state x%x " + "refcnt %u\n", + ndlp->nlp_DID, ndlp, rport, ndlp->rport, + ndlp->nlp_state, kref_read(&ndlp->kref)); return; + }
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -5622,6 +5628,7 @@ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *np = NULL; uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { @@ -5636,14 +5643,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); - return ndlp; + + /* Check for new or potentially stale node */ + if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) + return ndlp; + np = ndlp; } }
- /* FIND node did <did> NOT FOUND */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0932 FIND node did x%x NOT FOUND.\n", did); - return NULL; + if (!np) + /* FIND node did <did> NOT FOUND */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0932 FIND node did x%x NOT FOUND.\n", did); + + return np; }
struct lpfc_nodelist * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a3658ef1141b..50c761991191 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -13190,6 +13190,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) eqhdl = lpfc_get_eq_hdl(0); rc = pci_irq_vector(phba->pcidev, 0); if (rc < 0) { + free_irq(phba->pcidev->irq, phba); pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0496 MSI pci_irq_vec failed (%d)\n", rc); @@ -13270,6 +13271,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) eqhdl = lpfc_get_eq_hdl(0); retval = pci_irq_vector(phba->pcidev, 0); if (retval < 0) { + free_irq(phba->pcidev->irq, phba); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0502 INTR pci_irq_vec failed (%d)\n", retval); diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index c0a372868e1d..604f37e5c0c3 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, char *desc = NULL; u16 event;
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT)) + return; + event = event_reply->event;
switch (event) { @@ -2744,7 +2747,10 @@ static void mpi3mr_watchdog_work(struct work_struct *work) return; }
- if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) { + if (!(mrioc->facts.ioc_capabilities & + MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) && + (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) { + mrioc->ts_update_counter = 0; mpi3mr_sync_timestamp(mrioc); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 87784c96249a..47faa27bc355 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -679,6 +679,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, size_t data_in_sz = 0; long ret; u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; + int tm_ret;
issue_reset = 0;
@@ -1120,18 +1121,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, if (pcie_device && (!ioc->tm_custom_handling) && (!(mpt3sas_scsih_is_pcie_scsi_device( pcie_device->device_info)))) - mpt3sas_scsih_issue_locked_tm(ioc, + tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, pcie_device->reset_timeout, MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); else - mpt3sas_scsih_issue_locked_tm(ioc, + tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + + if (tm_ret != SUCCESS) { + ioc_info(ioc, + "target reset failed, issue hard reset: handle (0x%04x)\n", + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } } else mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 680ba180a672..89a2aaccdcfc 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -173,6 +173,10 @@ static const char *sdebug_version_date = "20210520"; #define DEF_ZBC_MAX_OPEN_ZONES 8 #define DEF_ZBC_NR_CONV_ZONES 1
+/* Default parameters for tape drives */ +#define TAPE_DEF_DENSITY 0x0 +#define TAPE_DEF_BLKSIZE 0 + #define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */ @@ -363,6 +367,10 @@ struct sdebug_dev_info { ktime_t create_ts; /* time since bootup that this device was created */ struct sdeb_zone_state *zstate;
+ /* For tapes */ + unsigned int tape_blksize; + unsigned int tape_density; + struct dentry *debugfs_entry; struct spinlock list_lock; struct list_head inject_err_list; @@ -773,7 +781,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { /* 20 */ {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ + {0, 0x1, 0, 0, NULL, NULL, /* REWIND ?? */ {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, @@ -2742,7 +2750,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, unsigned char *ap; unsigned char *arr __free(kfree); unsigned char *cmd = scp->cmnd; - bool dbd, llbaa, msense_6, is_disk, is_zbc; + bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC); if (!arr) @@ -2755,7 +2763,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp, llbaa = msense_6 ? false : !!(cmd[1] & 0x10); is_disk = (sdebug_ptype == TYPE_DISK); is_zbc = devip->zoned; - if ((is_disk || is_zbc) && !dbd) + is_tape = (sdebug_ptype == TYPE_TAPE); + if ((is_disk || is_zbc || is_tape) && !dbd) bd_len = llbaa ? 16 : 8; else bd_len = 0; @@ -2793,15 +2802,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp, put_unaligned_be32(0xffffffff, ap + 0); else put_unaligned_be32(sdebug_capacity, ap + 0); - put_unaligned_be16(sdebug_sector_size, ap + 6); + if (is_tape) { + ap[0] = devip->tape_density; + put_unaligned_be16(devip->tape_blksize, ap + 6); + } else + put_unaligned_be16(sdebug_sector_size, ap + 6); offset += bd_len; ap = arr + offset; } else if (16 == bd_len) { + if (is_tape) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4); + return check_condition_result; + } put_unaligned_be64((u64)sdebug_capacity, ap + 0); put_unaligned_be32(sdebug_sector_size, ap + 12); offset += bd_len; ap = arr + offset; } + if (cmd[2] == 0) + goto only_bd; /* Only block descriptor requested */
/* * N.B. If len>0 before resp_*_pg() call, then form of that call should be: @@ -2902,6 +2921,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp, default: goto bad_pcode; } +only_bd: if (msense_6) arr[0] = offset - 1; else @@ -2945,8 +2965,27 @@ static int resp_mode_select(struct scsi_cmnd *scp, __func__, param_len, res); md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); - off = bd_len + (mselect6 ? 4 : 8); - if (md_len > 2 || off >= res) { + off = (mselect6 ? 4 : 8); + if (sdebug_ptype == TYPE_TAPE) { + int blksize; + + if (bd_len != 8) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, + mselect6 ? 3 : 6, -1); + return check_condition_result; + } + blksize = get_unaligned_be16(arr + off + 6); + if ((blksize % 4) != 0) { + mk_sense_invalid_fld(scp, SDEB_IN_DATA, off + 6, -1); + return check_condition_result; + } + devip->tape_density = arr[off]; + devip->tape_blksize = blksize; + } + off += bd_len; + if (off >= res) + return 0; /* No page written, just descriptors */ + if (md_len > 2) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } @@ -5835,6 +5874,10 @@ static struct sdebug_dev_info *sdebug_device_create( } else { devip->zoned = false; } + if (sdebug_ptype == TYPE_TAPE) { + devip->tape_density = TAPE_DEF_DENSITY; + devip->tape_blksize = TAPE_DEF_BLKSIZE; + } devip->create_ts = ktime_get_boottime(); atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0)); spin_lock_init(&devip->list_lock); diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 093774d77534..daa160459c9b 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -17,7 +17,9 @@ static struct ctl_table scsi_table[] = { .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), .mode = 0644, - .proc_handler = proc_dointvec }, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX }, };
static struct ctl_table_header *scsi_table_header; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index a17441635ff3..3e982c166baf 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -952,7 +952,6 @@ static void reset_state(struct scsi_tape *STp) STp->partition = find_partition(STp); if (STp->partition < 0) STp->partition = 0; - STp->new_partition = STp->partition; } } @@ -2894,7 +2893,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon timeout = STp->long_timeout * 8;
DEBC_printk(STp, "Erasing tape.\n"); - fileno = blkno = at_sm = 0; break; case MTSETBLK: /* Set block length */ case MTSETDENSITY: /* Set tape density */ @@ -2927,14 +2925,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon if (cmd_in == MTSETDENSITY) { (STp->buffer)->b_data[4] = arg; STp->density_changed = 1; /* At least we tried ;-) */ + STp->changed_density = arg; } else if (cmd_in == SET_DENS_AND_BLK) (STp->buffer)->b_data[4] = arg >> 24; else (STp->buffer)->b_data[4] = STp->density; if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { ltmp = arg & MT_ST_BLKSIZE_MASK; - if (cmd_in == MTSETBLK) + if (cmd_in == MTSETBLK) { STp->blksize_changed = 1; /* At least we tried ;-) */ + STp->changed_blksize = arg; + } } else ltmp = STp->block_size; (STp->buffer)->b_data[9] = (ltmp >> 16); @@ -3081,7 +3082,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon cmd_in == MTSETDRVBUFFER || cmd_in == SET_DENS_AND_BLK) { if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && - !(STp->use_pf & PF_TESTED)) { + cmdstatp->sense_hdr.asc == 0x24 && + (STp->device)->scsi_level <= SCSI_2 && + !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; @@ -3633,9 +3636,25 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) retval = (-EIO); goto out; } - reset_state(STp); + reset_state(STp); /* Clears pos_unknown */ /* remove this when the midlevel properly clears was_reset */ STp->device->was_reset = 0; + + /* Fix the device settings after reset, ignore errors */ + if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK || + mtc.mt_op == MTEOM) { + if (STp->can_partitions) { + /* STp->new_partition contains the + * latest partition set + */ + STp->partition = 0; + switch_partition(STp); + } + if (STp->density_changed) + st_int_ioctl(STp, MTSETDENSITY, STp->changed_density); + if (STp->blksize_changed) + st_int_ioctl(STp, MTSETBLK, STp->changed_blksize); + } }
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index 1aaaf5369a40..6d31b894ee84 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -165,6 +165,7 @@ struct scsi_tape { unsigned char compression_changed; unsigned char drv_buffer; unsigned char density; + unsigned char changed_density; unsigned char door_locked; unsigned char autorew_dev; /* auto-rewind device */ unsigned char rew_at_close; /* rewind necessary at close */ @@ -172,6 +173,7 @@ struct scsi_tape { unsigned char cleaning_req; /* cleaning requested? */ unsigned char first_tur; /* first TEST UNIT READY */ int block_size; + int changed_blksize; int min_block; int max_block; int recover_count; /* From tape opening */ diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h index 27c9fa745fd5..b8d5244678f0 100644 --- a/drivers/soc/apple/rtkit-internal.h +++ b/drivers/soc/apple/rtkit-internal.h @@ -44,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer; struct apple_rtkit_shmem crashlog_buffer; + struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer; char *syslog_msg_buffer; diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c index e6d940292c9f..45ccbe2cbcd6 100644 --- a/drivers/soc/apple/rtkit.c +++ b/drivers/soc/apple/rtkit.c @@ -66,8 +66,9 @@ enum { #define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56) -#define APPLE_RTKIT_OSLOG_INIT 1 -#define APPLE_RTKIT_OSLOG_ACK 3 +#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1 +#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36) +#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11 #define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12 @@ -251,15 +252,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, struct apple_rtkit_shmem *buffer, u8 ep, u64 msg) { - size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg); u64 reply; int err;
+ /* The different size vs. IOVA shifts look odd but are indeed correct this way */ + if (ep == APPLE_RTKIT_EP_OSLOG) { + buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg); + buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12; + } else { + buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12; + buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); + } + buffer->buffer = NULL; buffer->iomem = NULL; buffer->is_mapped = false; - buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); - buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n", buffer->size, &buffer->iova); @@ -284,11 +291,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, }
if (!buffer->is_mapped) { - reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, - APPLE_RTKIT_BUFFER_REQUEST); - reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages); - reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, - buffer->iova); + /* oslog uses different fields and needs a shifted IOVA instead of size */ + if (ep == APPLE_RTKIT_EP_OSLOG) { + reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, + APPLE_RTKIT_OSLOG_BUFFER_REQUEST); + reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size); + reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA, + buffer->iova >> 12); + } else { + reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, + APPLE_RTKIT_BUFFER_REQUEST); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, + buffer->size >> 12); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, + buffer->iova); + } apple_rtkit_send_message(rtk, ep, reply, NULL, false); }
@@ -482,25 +499,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg) } }
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg) -{ - u64 ack; - - dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg); - ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK); - apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false); -} - static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg) { u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) { - case APPLE_RTKIT_OSLOG_INIT: - apple_rtkit_oslog_rx_init(rtk, msg); + case APPLE_RTKIT_OSLOG_BUFFER_REQUEST: + apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer, + APPLE_RTKIT_EP_OSLOG, msg); break; default: - dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg); + dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", + msg); } }
@@ -667,7 +677,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, rtk->mbox->rx = apple_rtkit_rx; rtk->mbox->cookie = rtk;
- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM, + rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM, dev_name(rtk->dev)); if (!rtk->wq) { ret = -ENOMEM; @@ -710,6 +720,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer); apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer); @@ -890,6 +901,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer); apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer); diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 5250c1d702eb..aaa965d4b050 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -155,6 +155,7 @@ #define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23 #define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24 #define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30 +#define MT8188_MUTEX_MOD_DISP1_DPI1 38 #define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
#define MT8195_MUTEX_MOD_DISP_OVL0 0 @@ -289,6 +290,7 @@ #define MT8188_MUTEX_SOF_DSI0 1 #define MT8188_MUTEX_SOF_DP_INTF0 3 #define MT8188_MUTEX_SOF_DP_INTF1 4 +#define MT8188_MUTEX_SOF_DPI1 5 #define MT8195_MUTEX_SOF_DSI0 1 #define MT8195_MUTEX_SOF_DSI1 2 #define MT8195_MUTEX_SOF_DP_INTF0 3 @@ -301,6 +303,7 @@ #define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7) #define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7) #define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7) +#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7) #define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7) #define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7) #define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7) @@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0, [DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0, [DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1, + [DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1, [DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER, [DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0, [DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1, @@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0, + [MUTEX_SOF_DPI1] = + MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1, [MUTEX_SOF_DP_INTF0] = MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0, [MUTEX_SOF_DP_INTF1] = diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c index 97006cc3b946..8e681f519526 100644 --- a/drivers/soc/samsung/exynos-asv.c +++ b/drivers/soc/samsung/exynos-asv.c @@ -9,6 +9,7 @@ * Samsung Exynos SoC Adaptive Supply Voltage support */
+#include <linux/array_size.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/energy_model.h> diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index bba8d86ae1bb..dedfe6d0fb3f 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -12,6 +12,7 @@ * Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support */
+#include <linux/array_size.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/mfd/syscon.h> diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index dd5256e5aae1..c40313886a01 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -5,6 +5,7 @@ // // Exynos - CPU PMU(Power Management Unit) support
+#include <linux/array_size.h> #include <linux/arm-smccc.h> #include <linux/of.h> #include <linux/of_address.h> diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c index 114352695ac2..5a93a68dba87 100644 --- a/drivers/soc/samsung/exynos-usi.c +++ b/drivers/soc/samsung/exynos-usi.c @@ -6,6 +6,7 @@ * Samsung Exynos USI driver (Universal Serial Interface). */
+#include <linux/array_size.h> #include <linux/clk.h> #include <linux/mfd/syscon.h> #include <linux/module.h> diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c index 30f230ed1769..4bad12a99542 100644 --- a/drivers/soc/samsung/exynos3250-pmu.c +++ b/drivers/soc/samsung/exynos3250-pmu.c @@ -5,6 +5,7 @@ // // Exynos3250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c index 7a2d50be6b4a..2ae5c3e1b07a 100644 --- a/drivers/soc/samsung/exynos5250-pmu.c +++ b/drivers/soc/samsung/exynos5250-pmu.c @@ -5,6 +5,7 @@ // // Exynos5250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c index 6fedcd78cb45..58a2209795f7 100644 --- a/drivers/soc/samsung/exynos5420-pmu.c +++ b/drivers/soc/samsung/exynos5420-pmu.c @@ -5,6 +5,7 @@ // // Exynos5420 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h> #include <linux/pm.h> #include <linux/soc/samsung/exynos-regs-pmu.h> #include <linux/soc/samsung/exynos-pmu.h> diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index 4fb0f0a24828..704039eb3c07 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -105,6 +105,12 @@ k3_chipinfo_variant_to_sr(unsigned int partno, unsigned int variant, return -ENODEV; }
+static const struct regmap_config k3_chipinfo_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + static int k3_chipinfo_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; @@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct soc_device *soc_dev; struct regmap *regmap; + void __iomem *base; u32 partno_id; u32 variant; u32 jtag_id; u32 mfg; int ret;
- regmap = device_node_to_regmap(node); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg); if (IS_ERR(regmap)) return PTR_ERR(regmap);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index 0d01849c3586..e3d5e6c1d582 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -1110,6 +1110,7 @@ static int __maybe_unused amd_suspend(struct device *dev) amd_sdw_wake_enable(amd_manager, false); return amd_sdw_clock_stop(amd_manager); } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { + amd_sdw_wake_enable(amd_manager, false); /* * As per hardware programming sequence on AMD platforms, * clock stop should be invoked first before powering-off @@ -1137,6 +1138,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev) amd_sdw_wake_enable(amd_manager, true); return amd_sdw_clock_stop(amd_manager); } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { + amd_sdw_wake_enable(amd_manager, true); ret = amd_sdw_clock_stop(amd_manager); if (ret) return ret; diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 263ca32f0c5c..6ca06cce41d3 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -121,6 +121,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
+ ret = sdw_irq_create(bus, fwnode); + if (ret) + return ret; + /* * SDW is an enumerable bus, but devices can be powered off. So, * they won't be able to report as present. @@ -137,6 +141,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
if (ret < 0) { dev_err(bus->dev, "Finding slaves failed:%d\n", ret); + sdw_irq_delete(bus); return ret; }
@@ -155,10 +160,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, bus->params.curr_bank = SDW_BANK0; bus->params.next_bank = SDW_BANK1;
- ret = sdw_irq_create(bus, fwnode); - if (ret) - return ret; - return 0; } EXPORT_SYMBOL(sdw_bus_master_add); diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 05652e983539..6f2b5ec5c87c 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -1341,7 +1341,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols) return val; }
-static void cdns_init_clock_ctrl(struct sdw_cdns *cdns) +static int cdns_init_clock_ctrl(struct sdw_cdns *cdns) { struct sdw_bus *bus = &cdns->bus; struct sdw_master_prop *prop = &bus->prop; @@ -1355,14 +1355,25 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns) prop->default_row, prop->default_col);
+ if (!prop->default_frame_rate || !prop->default_row) { + dev_err(cdns->dev, "Default frame_rate %d or row %d is invalid\n", + prop->default_frame_rate, prop->default_row); + return -EINVAL; + } + /* Set clock divider */ - divider = (prop->mclk_freq / prop->max_clk_freq) - 1; + divider = (prop->mclk_freq * SDW_DOUBLE_RATE_FACTOR / + bus->params.curr_dr_freq) - 1;
cdns_updatel(cdns, CDNS_MCP_CLK_CTRL0, CDNS_MCP_CLK_MCLKD_MASK, divider); cdns_updatel(cdns, CDNS_MCP_CLK_CTRL1, CDNS_MCP_CLK_MCLKD_MASK, divider);
+ /* Set frame shape base on the actual bus frequency. */ + prop->default_col = bus->params.curr_dr_freq / + prop->default_frame_rate / prop->default_row; + /* * Frame shape changes after initialization have to be done * with the bank switch mechanism @@ -1375,6 +1386,8 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns) ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ; cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval); cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval); + + return 0; }
/** @@ -1383,9 +1396,12 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns) */ int sdw_cdns_init(struct sdw_cdns *cdns) { + int ret; u32 val;
- cdns_init_clock_ctrl(cdns); + ret = cdns_init_clock_ctrl(cdns); + if (ret) + return ret;
sdw_cdns_check_self_clearing_bits(cdns, __func__, false, 0);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 3fa990fb59c7..7c43df252328 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. -// Copyright 2020 NXP +// Copyright 2020-2025 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -62,6 +62,7 @@ #define SPI_SR_TFIWF BIT(18) #define SPI_SR_RFDF BIT(17) #define SPI_SR_CMDFFF BIT(16) +#define SPI_SR_TXRXS BIT(30) #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \ @@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *transfer; bool cs = false; int status = 0; + u32 val = 0; + bool cs_change = false;
message->actual_length = 0;
+ /* Put DSPI in running mode if halted. */ + regmap_read(dspi->regmap, SPI_MCR, &val); + if (val & SPI_MCR_HALT) { + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + !(val & SPI_SR_TXRXS)) + ; + } + list_for_each_entry(transfer, &message->transfers, transfer_list) { dspi->cur_transfer = transfer; dspi->cur_msg = message; @@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; }
+ cs_change = transfer->cs_change; dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; dspi->len = transfer->len; @@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq);
@@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi_deassert_cs(spi, &cs); }
+ if (status || !cs_change) { + /* Put DSPI in stop mode */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_HALT, SPI_MCR_HALT); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + val & SPI_SR_TXRXS) + ; + } + message->status = status; spi_finalize_current_message(ctlr);
@@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + static const struct regmap_range dspi_volatile_ranges[] = { regmap_reg_range(SPI_MCR, SPI_TCR), regmap_reg_range(SPI_SR, SPI_SR), @@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = { .reg_stride = 4, .max_register = 0x88, .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, };
static const struct regmap_range dspi_xspi_volatile_ranges[] = { @@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { .reg_stride = 4, .max_register = 0x13c, .volatile_table = &dspi_xspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }, { .name = "pushr", @@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi) if (!spi_controller_is_target(dspi->ctlr)) mcr |= SPI_MCR_HOST;
+ mcr |= SPI_MCR_HALT; + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index c02c4204442f..0eb35c4e3987 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
priv->current_cs = spi_get_chipselect(spi, 0);
- spi_setup(priv->spi); - - return 0; + return spi_setup(priv->spi); }
static int spi_mux_setup(struct spi_device *spi) diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 40a64a598a74..5008489d6fac 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs, cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET; if (spi->mode & SPI_LSB_FIRST) cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET; - if (spi->mode & SPI_CS_HIGH) + if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0))) cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf) diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index b9df39e06e7c..4b091b4d4ff3 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -799,7 +799,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) { struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id; - irqreturn_t ret = IRQ_NONE; u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); @@ -814,27 +813,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) dma_status); }
- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) { + if (!mask && !dma_status) + return IRQ_NONE; + + if (mask & GQSPI_ISR_TXNOT_FULL_MASK) zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); - ret = IRQ_HANDLED; - }
- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) { + if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) zynqmp_process_dma_irq(xqspi); - ret = IRQ_HANDLED; - } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && - (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) { + else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && + (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL); - ret = IRQ_HANDLED; - }
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) { zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK); complete(&xqspi->data_completion); - ret = IRQ_HANDLED; } - return ret; + return IRQ_HANDLED; }
/** diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 97787002080a..1a9432646b70 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -588,29 +588,6 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state return 0; }
-int -vchiq_platform_init_state(struct vchiq_state *state) -{ - struct vchiq_arm_state *platform_state; - - platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); - if (!platform_state) - return -ENOMEM; - - rwlock_init(&platform_state->susp_res_lock); - - init_completion(&platform_state->ka_evt); - atomic_set(&platform_state->ka_use_count, 0); - atomic_set(&platform_state->ka_use_ack_count, 0); - atomic_set(&platform_state->ka_release_count, 0); - - platform_state->state = state; - - state->platform_state = (struct opaque_platform_state *)platform_state; - - return 0; -} - static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) { return (struct vchiq_arm_state *)state->platform_state; @@ -1358,6 +1335,39 @@ vchiq_keepalive_thread_func(void *v) return 0; }
+int +vchiq_platform_init_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *platform_state; + char threadname[16]; + + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); + if (!platform_state) + return -ENOMEM; + + snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", + state->id); + platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, + (void *)state, threadname); + if (IS_ERR(platform_state->ka_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + return PTR_ERR(platform_state->ka_thread); + } + + rwlock_init(&platform_state->susp_res_lock); + + init_completion(&platform_state->ka_evt); + atomic_set(&platform_state->ka_use_count, 0); + atomic_set(&platform_state->ka_use_ack_count, 0); + atomic_set(&platform_state->ka_release_count, 0); + + platform_state->state = state; + + state->platform_state = (struct opaque_platform_state *)platform_state; + + return 0; +} + int vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, enum USE_TYPE_E use_type) @@ -1678,7 +1688,6 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state, enum vchiq_connstate newstate) { struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); - char threadname[16];
dev_dbg(state->dev, "suspend: %d: %s->%s\n", state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate)); @@ -1693,17 +1702,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
arm_state->first_connect = 1; write_unlock_bh(&arm_state->susp_res_lock); - snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", - state->id); - arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, - (void *)state, - threadname); - if (IS_ERR(arm_state->ka_thread)) { - dev_err(state->dev, "suspend: Couldn't create thread %s\n", - threadname); - } else { - wake_up_process(arm_state->ka_thread); - } + wake_up_process(arm_state->ka_thread); }
static const struct of_device_id vchiq_of_match[] = { diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 6002283cbeba..68bbdf3ee101 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4317,8 +4317,8 @@ int iscsit_close_connection( spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn); - iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); + iscsit_stop_nopin_response_timer(conn);
if (conn->conn_transport->iscsit_wait_conn) conn->conn_transport->iscsit_wait_conn(conn); diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 61c065702350..701dcbd7b63c 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -2151,8 +2151,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) if (descr->serv_action_valid) return TCM_INVALID_CDB_FIELD;
- if (!descr->enabled || descr->enabled(descr, cmd)) + if (!descr->enabled || descr->enabled(descr, cmd)) { *opcode = descr; + return TCM_NO_SENSE; + } break; case 0x2: /* @@ -2166,8 +2168,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) if (descr->serv_action_valid && descr->service_action == requested_sa) { if (!descr->enabled || descr->enabled(descr, - cmd)) + cmd)) { *opcode = descr; + return TCM_NO_SENSE; + } } else if (!descr->serv_action_valid) return TCM_INVALID_CDB_FIELD; break; @@ -2180,13 +2184,15 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) */ if (descr->service_action == requested_sa) if (!descr->enabled || descr->enabled(descr, - cmd)) + cmd)) { *opcode = descr; + return TCM_NO_SENSE; + } break; } }
- return 0; + return TCM_NO_SENSE; }
static sense_reason_t diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 65b33b56a9be..8c44f378b61e 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -329,6 +329,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) tj_max = intel_tcc_get_tjmax(cpu); if (tj_max < 0) return tj_max; + tj_max *= 1000;
zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL); if (!zonedev) diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c index 4b3225377e8f..3295b27ab70d 100644 --- a/drivers/thermal/mediatek/lvts_thermal.c +++ b/drivers/thermal/mediatek/lvts_thermal.c @@ -65,7 +65,6 @@ #define LVTS_HW_FILTER 0x0 #define LVTS_TSSEL_CONF 0x13121110 #define LVTS_CALSCALE_CONF 0x300 -#define LVTS_MONINT_CONF 0x0300318C
#define LVTS_MONINT_OFFSET_SENSOR0 0xC #define LVTS_MONINT_OFFSET_SENSOR1 0x180 @@ -929,7 +928,7 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl) * The LVTS_MONINT register layout is the same as the LVTS_MONINTSTS * register, except we set the bits to enable the interrupt. */ - writel(LVTS_MONINT_CONF, LVTS_MONINT(lvts_ctrl->base)); + writel(0, LVTS_MONINT(lvts_ctrl->base));
return 0; } diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 52e26be8c53d..aed2729f63d0 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -18,6 +18,7 @@ #define SITES_MAX 16 #define TMR_DISABLE 0x0 #define TMR_ME 0x80000000 +#define TMR_CMD BIT(29) #define TMR_ALPF 0x0c000000 #define TMR_ALPF_V2 0x03000000 #define TMTMIR_DEFAULT 0x0000000f @@ -356,6 +357,12 @@ static int qoriq_tmu_suspend(struct device *dev) if (ret) return ret;
+ if (data->ver > TMU_VER1) { + ret = regmap_set_bits(data->regmap, REGS_TMR, TMR_CMD); + if (ret) + return ret; + } + clk_disable_unprepare(data->clk);
return 0; @@ -370,6 +377,12 @@ static int qoriq_tmu_resume(struct device *dev) if (ret) return ret;
+ if (data->ver > TMU_VER1) { + ret = regmap_clear_bits(data->regmap, REGS_TMR, TMR_CMD); + if (ret) + return ret; + } + /* Enable monitoring */ return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME); } diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index eeb64433ebbc..3488be762067 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -93,9 +93,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt) if (ret) goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, nvm_write); - if (ret) - goto err_nvm; + if (!rt->no_nvm_upgrade) { + ret = tb_nvm_add_non_active(nvm, nvm_write); + if (ret) + goto err_nvm; + }
rt->nvm = nvm; dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index c1376727642a..051967992965 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1657,7 +1657,7 @@ static void serial8250_disable_ms(struct uart_port *port) if (up->bugs & UART_BUG_NOMSR) return;
- mctrl_gpio_disable_ms(up->gpios); + mctrl_gpio_disable_ms_no_sync(up->gpios);
up->ier &= ~UART_IER_MSI; serial_port_out(port, UART_IER, up->ier); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 09b246c9e389..8bd39586a49f 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -700,7 +700,7 @@ static void atmel_disable_ms(struct uart_port *port)
atmel_port->ms_irq_enabled = false;
- mctrl_gpio_disable_ms(atmel_port->gpios); + mctrl_gpio_disable_ms_no_sync(atmel_port->gpios);
if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) idr |= ATMEL_US_CTSIC; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 90974d338f3c..8e3b15534bc7 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1596,7 +1596,7 @@ static void imx_uart_shutdown(struct uart_port *port) imx_uart_dma_exit(sport); }
- mctrl_gpio_disable_ms(sport->gpios); + mctrl_gpio_disable_ms_sync(sport->gpios);
uart_port_lock_irqsave(&sport->port, &flags); ucr2 = imx_uart_readl(sport, UCR2); diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index 8855688a5b6c..ca55bcc0b611 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -322,11 +322,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) } EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
-/** - * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines - * @gpios: gpios to disable - */ -void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) +static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync) { enum mctrl_gpio_idx i;
@@ -342,10 +338,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) if (!gpios->irq[i]) continue;
- disable_irq(gpios->irq[i]); + if (sync) + disable_irq(gpios->irq[i]); + else + disable_irq_nosync(gpios->irq[i]); } } -EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms); + +/** + * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms + * lines, and wait for any pending IRQ to be processed + * @gpios: gpios to disable + */ +void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios) +{ + mctrl_gpio_disable_ms(gpios, true); +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync); + +/** + * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the + * ms lines, and return immediately + * @gpios: gpios to disable + */ +void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios) +{ + mctrl_gpio_disable_ms(gpios, false); +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync);
void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) { diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h index fc76910fb105..79e97838ebe5 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.h +++ b/drivers/tty/serial/serial_mctrl_gpio.h @@ -87,9 +87,16 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios); void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
/* - * Disable gpio interrupts to report status line changes. + * Disable gpio interrupts to report status line changes, and block until + * any corresponding IRQ is processed */ -void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios); +void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios); + +/* + * Disable gpio interrupts to report status line changes, and return + * immediately + */ +void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios);
/* * Enable gpio wakeup interrupts to enable wake up source. @@ -148,7 +155,11 @@ static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) { }
-static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) +static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios) { }
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index f43059e1b5c2..76cf177b040e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -104,6 +104,20 @@ struct plat_sci_reg { u8 offset, size; };
+struct sci_suspend_regs { + u16 scdl; + u16 sccks; + u16 scsmr; + u16 scscr; + u16 scfcr; + u16 scsptr; + u16 hssrr; + u16 scpcr; + u16 scpdr; + u8 scbrr; + u8 semr; +}; + struct sci_port_params { const struct plat_sci_reg regs[SCIx_NR_REGS]; unsigned int fifosize; @@ -134,6 +148,8 @@ struct sci_port { struct dma_chan *chan_tx; struct dma_chan *chan_rx;
+ struct reset_control *rstc; + #ifdef CONFIG_SERIAL_SH_SCI_DMA struct dma_chan *chan_tx_saved; struct dma_chan *chan_rx_saved; @@ -153,6 +169,7 @@ struct sci_port { int rx_trigger; struct timer_list rx_fifo_timer; int rx_fifo_timeout; + struct sci_suspend_regs suspend_regs; u16 hscif_tot;
bool has_rtscts; @@ -2297,7 +2314,7 @@ static void sci_shutdown(struct uart_port *port) dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
s->autorts = false; - mctrl_gpio_disable_ms(to_sci_port(port)->gpios); + mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios);
uart_port_lock_irqsave(port, &flags); sci_stop_rx(port); @@ -3384,6 +3401,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, }
sp = &sci_ports[id]; + sp->rstc = rstc; *dev_id = id;
p->type = SCI_OF_TYPE(data); @@ -3532,13 +3550,77 @@ static int sci_probe(struct platform_device *dev) return 0; }
+static void sci_console_save(struct sci_port *s) +{ + struct sci_suspend_regs *regs = &s->suspend_regs; + struct uart_port *port = &s->port; + + if (sci_getreg(port, SCDL)->size) + regs->scdl = sci_serial_in(port, SCDL); + if (sci_getreg(port, SCCKS)->size) + regs->sccks = sci_serial_in(port, SCCKS); + if (sci_getreg(port, SCSMR)->size) + regs->scsmr = sci_serial_in(port, SCSMR); + if (sci_getreg(port, SCSCR)->size) + regs->scscr = sci_serial_in(port, SCSCR); + if (sci_getreg(port, SCFCR)->size) + regs->scfcr = sci_serial_in(port, SCFCR); + if (sci_getreg(port, SCSPTR)->size) + regs->scsptr = sci_serial_in(port, SCSPTR); + if (sci_getreg(port, SCBRR)->size) + regs->scbrr = sci_serial_in(port, SCBRR); + if (sci_getreg(port, HSSRR)->size) + regs->hssrr = sci_serial_in(port, HSSRR); + if (sci_getreg(port, SCPCR)->size) + regs->scpcr = sci_serial_in(port, SCPCR); + if (sci_getreg(port, SCPDR)->size) + regs->scpdr = sci_serial_in(port, SCPDR); + if (sci_getreg(port, SEMR)->size) + regs->semr = sci_serial_in(port, SEMR); +} + +static void sci_console_restore(struct sci_port *s) +{ + struct sci_suspend_regs *regs = &s->suspend_regs; + struct uart_port *port = &s->port; + + if (sci_getreg(port, SCDL)->size) + sci_serial_out(port, SCDL, regs->scdl); + if (sci_getreg(port, SCCKS)->size) + sci_serial_out(port, SCCKS, regs->sccks); + if (sci_getreg(port, SCSMR)->size) + sci_serial_out(port, SCSMR, regs->scsmr); + if (sci_getreg(port, SCSCR)->size) + sci_serial_out(port, SCSCR, regs->scscr); + if (sci_getreg(port, SCFCR)->size) + sci_serial_out(port, SCFCR, regs->scfcr); + if (sci_getreg(port, SCSPTR)->size) + sci_serial_out(port, SCSPTR, regs->scsptr); + if (sci_getreg(port, SCBRR)->size) + sci_serial_out(port, SCBRR, regs->scbrr); + if (sci_getreg(port, HSSRR)->size) + sci_serial_out(port, HSSRR, regs->hssrr); + if (sci_getreg(port, SCPCR)->size) + sci_serial_out(port, SCPCR, regs->scpcr); + if (sci_getreg(port, SCPDR)->size) + sci_serial_out(port, SCPDR, regs->scpdr); + if (sci_getreg(port, SEMR)->size) + sci_serial_out(port, SEMR, regs->semr); +} + static __maybe_unused int sci_suspend(struct device *dev) { struct sci_port *sport = dev_get_drvdata(dev);
- if (sport) + if (sport) { uart_suspend_port(&sci_uart_driver, &sport->port);
+ if (!console_suspend_enabled && uart_console(&sport->port)) + sci_console_save(sport); + else + return reset_control_assert(sport->rstc); + } + return 0; }
@@ -3546,8 +3628,18 @@ static __maybe_unused int sci_resume(struct device *dev) { struct sci_port *sport = dev_get_drvdata(dev);
- if (sport) + if (sport) { + if (!console_suspend_enabled && uart_console(&sport->port)) { + sci_console_restore(sport); + } else { + int ret = reset_control_deassert(sport->rstc); + + if (ret) + return ret; + } + uart_resume_port(&sci_uart_driver, &sport->port); + }
return 0; } diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 9b9981352b1e..e685cace5c85 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -944,7 +944,7 @@ static void stm32_usart_enable_ms(struct uart_port *port)
static void stm32_usart_disable_ms(struct uart_port *port) { - mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); + mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios); }
/* Transmit stop */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index a9b032d2f4a8..247e425428c8 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = { .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | + UFS_DEVICE_QUIRK_PA_HIBER8TIME | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = UFS_ANY_MODEL, @@ -8459,6 +8460,31 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) return ret; }
+/** + * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME. + * @hba: per-adapter instance + * + * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter + * to ensure proper hibernation timing. This function retrieves the current + * PA_HIBERN8TIME value and increments it by 100us. + */ +static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba) +{ + u32 pa_h8time; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time); + if (ret) { + dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret); + return; + } + + /* Increment by 1 to increase hibernation time by 100 µs */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1); + if (ret) + dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret); +} + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) { ufshcd_vops_apply_dev_quirks(hba); @@ -8469,6 +8495,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) ufshcd_quirk_tune_host_pa_tactivate(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) + ufshcd_quirk_override_pa_h8time(hba); }
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8c26275696df..f9c51e0f2e37 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1959,7 +1959,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->interrupters = NULL;
xhci->page_size = 0; - xhci->page_shift = 0; xhci->usb2_rhub.bus_state.bus_suspended = 0; xhci->usb3_rhub.bus_state.bus_suspended = 0; } @@ -2378,6 +2377,22 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, } EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+static void xhci_hcd_page_size(struct xhci_hcd *xhci) +{ + u32 page_size; + + page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; + if (!is_power_of_2(page_size)) { + xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); + /* Fallback to 4K page size, since that's common */ + page_size = 1; + } + + xhci->page_size = page_size << 12; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", + xhci->page_size >> 10); +} + int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { struct xhci_interrupter *ir; @@ -2385,7 +2400,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) dma_addr_t dma; unsigned int val, val2; u64 val_64; - u32 page_size, temp; + u32 temp; int i;
INIT_LIST_HEAD(&xhci->cmd_list); @@ -2394,20 +2409,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); init_completion(&xhci->cmd_ring_stop_completion);
- page_size = readl(&xhci->op_regs->page_size); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size register = 0x%x", page_size); - val = ffs(page_size) - 1; - if (val < 16) - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (val + 12)) / 1024); - else - xhci_warn(xhci, "WARN: no supported page size\n"); - /* Use 4K pages, since that's common and the minimum the HC supports */ - xhci->page_shift = 12; - xhci->page_size = 1 << xhci->page_shift; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "HCD page size set to %iK", xhci->page_size / 1024); + xhci_hcd_page_size(xhci);
/* * Program the Number of Device Slots Enabled field in the CONFIG diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e70e4f6bf08..fbc8419a5473 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1156,7 +1156,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, */ switch (GET_EP_CTX_STATE(ep_ctx)) { case EP_STATE_HALTED: - xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); + xhci_dbg(xhci, "Stop ep completion raced with stall\n"); + /* + * If the halt happened before Stop Endpoint failed, its transfer event + * should have already been handled and Reset Endpoint should be pending. + */ + if (ep->ep_state & EP_HALTED) + goto reset_done; + if (ep->ep_state & EP_HAS_STREAMS) { reset_type = EP_SOFT_RESET; } else { @@ -1167,8 +1174,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, } /* reset ep, reset handler cleans up cancelled tds */ err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); + xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); if (err) break; +reset_done: + /* Reset EP handler will clean up cancelled TDs */ ep->ep_state &= ~EP_STOP_CMD_PENDING; return; case EP_STATE_STOPPED: diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2a954efa53e8..c4d5b90ef90a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -211,6 +211,9 @@ struct xhci_op_regs { #define CONFIG_CIE (1 << 9) /* bits 10:31 - reserved and should be preserved */
+/* bits 15:0 - HCD page shift bit */ +#define XHCI_PAGE_SIZE_MASK 0xffff + /** * struct xhci_intr_reg - Interrupt Register Set * @irq_pending: IMAN - Interrupt Management Register. Used to enable @@ -1503,10 +1506,7 @@ struct xhci_hcd { u16 max_interrupters; /* imod_interval in ns (I * 250ns) */ u32 imod_interval; - /* 4KB min, 128MB max */ - int page_size; - /* Valid values are 12 to 20, inclusive */ - int page_shift; + u32 page_size; /* MSI-X/MSI vectors */ int nvecs; /* optional clocks */ diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 5f581e71e201..76aedac37a78 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -3884,6 +3884,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->mvdev.max_vqs = max_vqs; mvdev = &ndev->mvdev; mvdev->mdev = mdev; + /* cpu_to_mlx5vdpa16() below depends on this flag */ + mvdev->actual_features = + (device_features & BIT_ULL(VIRTIO_F_VERSION_1));
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index ea2745c1ac5e..8ea38e7421df 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1813,7 +1813,8 @@ int vfio_config_init(struct vfio_pci_core_device *vdev) cpu_to_le16(PCI_COMMAND_MEMORY); }
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx || + vdev->pdev->irq == IRQ_NOTCONNECTED) vconfig[PCI_INTERRUPT_PIN] = 0;
ret = vfio_cap_init(vdev); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index c9eaba227636..087c273a547f 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -727,15 +727,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type) { if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { - u8 pin; - - if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || - vdev->nointx || vdev->pdev->is_virtfn) - return 0; - - pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); - - return pin ? 1 : 0; + return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0; } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { u8 pos; u16 flags; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 8382c5834335..565966351dfa 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -259,7 +259,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev, if (!is_irq_none(vdev)) return -EINVAL;
- if (!pdev->irq) + if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED) return -ENODEV;
name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 35a03306d134..38d243d914d0 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -571,6 +571,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) int ret;
llnode = llist_del_all(&svq->completion_list); + + mutex_lock(&svq->vq.mutex); + llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) { se_cmd = &cmd->tvc_se_cmd;
@@ -604,6 +607,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) vhost_scsi_release_cmd_res(se_cmd); }
+ mutex_unlock(&svq->vq.mutex); + if (signal) vhost_signal(&svq->vs->dev, &svq->vq); } @@ -757,7 +762,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, size_t len = iov_iter_count(iter); unsigned int nbytes = 0; struct page *page; - int i; + int i, ret;
if (cmd->tvc_data_direction == DMA_FROM_DEVICE) { cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter, @@ -770,6 +775,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, page = alloc_page(GFP_KERNEL); if (!page) { i--; + ret = -ENOMEM; goto err; }
@@ -777,8 +783,10 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, sg_set_page(&sg[i], page, nbytes, 0);
if (cmd->tvc_data_direction == DMA_TO_DEVICE && - copy_page_from_iter(page, 0, nbytes, iter) != nbytes) + copy_page_from_iter(page, 0, nbytes, iter) != nbytes) { + ret = -EFAULT; goto err; + }
len -= nbytes; } @@ -793,7 +801,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter, for (; i >= 0; i--) __free_page(sg_page(&sg[i])); kfree(cmd->saved_iter_addr); - return -ENOMEM; + return ret; }
static int @@ -1277,9 +1285,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (data_direction != DMA_NONE) { - if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, - &prot_iter, exp_data_len, - &data_iter))) { + ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter, + exp_data_len, &data_iter); + if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd); goto err; @@ -1346,8 +1354,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work) else resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ mutex_lock(&tmf->svq->vq.mutex); vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs, tmf->vq_desc, &tmf->resp_iov, resp_code); + mutex_unlock(&tmf->svq->vq.mutex); + vhost_scsi_release_tmf_res(tmf); }
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 3ff1b2a8659e..f9475c14f733 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -59,12 +59,11 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, }
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc, info); + region.color = bg; region.dx = sx * vc->vc_font.width; region.dy = sy * vc->vc_font.height; region.width = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index e8b4e8c119b5..07d127110ca4 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1258,7 +1258,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; - + int fg, bg; struct fbcon_display *p = &fb_display[vc->vc_num]; u_int y_break;
@@ -1279,16 +1279,18 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, fbcon_clear_margins(vc, 0); }
+ fg = get_color(vc, info, vc->vc_video_erase_char, 1); + bg = get_color(vc, info, vc->vc_video_erase_char, 0); /* Split blits that cross physical y_wrap boundary */
y_break = p->vrows - p->yscroll; if (sy < y_break && sy + height - 1 >= y_break) { u_int b = y_break - sy; - ops->clear(vc, info, real_y(p, sy), sx, b, width); + ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); ops->clear(vc, info, real_y(p, sy + b), sx, height - b, - width); + width, fg, bg); } else - ops->clear(vc, info, real_y(p, sy), sx, height, width); + ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); }
static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx, diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index df70ea5ec5b3..4d97e6d8a16a 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -55,7 +55,7 @@ struct fbcon_ops { void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width); + int sx, int height, int width, int fb, int bg); void (*putcs)(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg); @@ -116,42 +116,6 @@ static inline int mono_col(const struct fb_info *info) return (~(0xfff << max_len)) & 0xff; }
-static inline int attr_col_ec(int shift, struct vc_data *vc, - struct fb_info *info, int is_fg) -{ - int is_mono01; - int col; - int fg; - int bg; - - if (!vc) - return 0; - - if (vc->vc_can_do_color) - return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char) - : attr_bgcol(shift,vc->vc_video_erase_char); - - if (!info) - return 0; - - col = mono_col(info); - is_mono01 = info->fix.visual == FB_VISUAL_MONO01; - - if (attr_reverse(vc->vc_video_erase_char)) { - fg = is_mono01 ? col : 0; - bg = is_mono01 ? 0 : col; - } - else { - fg = is_mono01 ? 0 : col; - bg = is_mono01 ? col : 0; - } - - return is_fg ? fg : bg; -} - -#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) -#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) - /* * Scroll Method */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index f9b794ff7d39..89ef4ba7e867 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -78,14 +78,13 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, }
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vyres = GETVYRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info); + region.color = bg; region.dx = sy * vc->vc_font.height; region.dy = vyres - ((sx + width) * vc->vc_font.width); region.height = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 903f6fc174e1..b9dac7940fb7 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -63,14 +63,13 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, }
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vxres = GETVXRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info); + region.color = bg; region.dx = vxres - ((sy + height) * vc->vc_font.height); region.dy = sx * vc->vc_font.width; region.height = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 594331936fd3..0af7913a2abd 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -64,15 +64,14 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, }
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vyres = GETVYRES(ops->p, info); u32 vxres = GETVXRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info); + region.color = bg; region.dy = vyres - ((sy + height) * vc->vc_font.height); region.dx = vxres - ((sx + width) * vc->vc_font.width); region.width = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index eff7ec4da167..d342b90c42b7 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -32,16 +32,14 @@ static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, }
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fb_tilerect rect; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
rect.index = vc->vc_video_erase_char & ((vc->vc_hi_font_mask) ? 0x1ff : 0xff); - rect.fg = attr_fgcol_ec(fgshift, vc, info); - rect.bg = attr_bgcol_ec(bgshift, vc, info); + rect.fg = fg; + rect.bg = bg; rect.sx = sx; rect.sy = sy; rect.width = width; @@ -76,7 +74,42 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info, static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, int color, int bottom_only) { - return; + unsigned int cw = vc->vc_font.width; + unsigned int ch = vc->vc_font.height; + unsigned int rw = info->var.xres - (vc->vc_cols*cw); + unsigned int bh = info->var.yres - (vc->vc_rows*ch); + unsigned int rs = info->var.xres - rw; + unsigned int bs = info->var.yres - bh; + unsigned int vwt = info->var.xres_virtual / cw; + unsigned int vht = info->var.yres_virtual / ch; + struct fb_tilerect rect; + + rect.index = vc->vc_video_erase_char & + ((vc->vc_hi_font_mask) ? 0x1ff : 0xff); + rect.fg = color; + rect.bg = color; + + if ((int) rw > 0 && !bottom_only) { + rect.sx = (info->var.xoffset + rs + cw - 1) / cw; + rect.sy = 0; + rect.width = (rw + cw - 1) / cw; + rect.height = vht; + if (rect.width + rect.sx > vwt) + rect.width = vwt - rect.sx; + if (rect.sx < vwt) + info->tileops->fb_tilefill(info, &rect); + } + + if ((int) bh > 0) { + rect.sx = info->var.xoffset / cw; + rect.sy = (info->var.yoffset + bs) / ch; + rect.width = rs / cw; + rect.height = (bh + ch - 1) / ch; + if (rect.height + rect.sy > vht) + rect.height = vht - rect.sy; + if (rect.sy < vht) + info->tileops->fb_tilefill(info, &rect); + } }
static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable, diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 5ac8201c3533..b71d15794ce8 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1827,6 +1827,7 @@ static void fsl_diu_remove(struct platform_device *pdev) int i;
data = dev_get_drvdata(&pdev->dev); + device_remove_file(&pdev->dev, &data->dev_attr); disable_lcdc(&data->fsl_diu_info[0]);
free_irq(data->irq, data->diu_reg); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 1f8a322eb00b..147926c8bae0 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2530,7 +2530,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->event_triggered) - vq->event_triggered = false; + data_race(vq->event_triggered = false);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : virtqueue_enable_cb_delayed_split(_vq); diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index b4773a6aaf8c..837e15701c0e 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -11,21 +11,30 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/kstrtox.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/watchdog.h>
static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +struct aspeed_wdt_scu { + const char *compatible; + u32 reset_status_reg; + u32 wdt_reset_mask; + u32 wdt_reset_mask_shift; +};
struct aspeed_wdt_config { u32 ext_pulse_width_mask; u32 irq_shift; u32 irq_mask; + struct aspeed_wdt_scu scu; };
struct aspeed_wdt { @@ -39,18 +48,36 @@ static const struct aspeed_wdt_config ast2400_config = { .ext_pulse_width_mask = 0xff, .irq_shift = 0, .irq_mask = 0, + .scu = { + .compatible = "aspeed,ast2400-scu", + .reset_status_reg = 0x3c, + .wdt_reset_mask = 0x1, + .wdt_reset_mask_shift = 1, + }, };
static const struct aspeed_wdt_config ast2500_config = { .ext_pulse_width_mask = 0xfffff, .irq_shift = 12, .irq_mask = GENMASK(31, 12), + .scu = { + .compatible = "aspeed,ast2500-scu", + .reset_status_reg = 0x3c, + .wdt_reset_mask = 0x1, + .wdt_reset_mask_shift = 2, + }, };
static const struct aspeed_wdt_config ast2600_config = { .ext_pulse_width_mask = 0xfffff, .irq_shift = 0, .irq_mask = GENMASK(31, 10), + .scu = { + .compatible = "aspeed,ast2600-scu", + .reset_status_reg = 0x74, + .wdt_reset_mask = 0xf, + .wdt_reset_mask_shift = 16, + }, };
static const struct of_device_id aspeed_wdt_of_table[] = { @@ -213,6 +240,56 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd, return 0; }
+static void aspeed_wdt_update_bootstatus(struct platform_device *pdev, + struct aspeed_wdt *wdt) +{ + const struct resource *res; + struct aspeed_wdt_scu scu = wdt->cfg->scu; + struct regmap *scu_base; + u32 reset_mask_width; + u32 reset_mask_shift; + u32 idx = 0; + u32 status; + int ret; + + if (!of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt")) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + idx = ((intptr_t)wdt->base & 0x00000fff) / (uintptr_t)resource_size(res); + } + + scu_base = syscon_regmap_lookup_by_compatible(scu.compatible); + if (IS_ERR(scu_base)) { + wdt->wdd.bootstatus = WDIOS_UNKNOWN; + return; + } + + ret = regmap_read(scu_base, scu.reset_status_reg, &status); + if (ret) { + wdt->wdd.bootstatus = WDIOS_UNKNOWN; + return; + } + + reset_mask_width = hweight32(scu.wdt_reset_mask); + reset_mask_shift = scu.wdt_reset_mask_shift + + reset_mask_width * idx; + + if (status & (scu.wdt_reset_mask << reset_mask_shift)) + wdt->wdd.bootstatus = WDIOF_CARDRESET; + + /* clear wdt reset event flag */ + if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt") || + of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-wdt")) { + ret = regmap_read(scu_base, scu.reset_status_reg, &status); + if (!ret) { + status &= ~(scu.wdt_reset_mask << reset_mask_shift); + regmap_write(scu_base, scu.reset_status_reg, status); + } + } else { + regmap_write(scu_base, scu.reset_status_reg, + scu.wdt_reset_mask << reset_mask_shift); + } +} + /* access_cs0 shows if cs0 is accessible, hence the reverted bit */ static ssize_t access_cs0_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -458,10 +535,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) writel(duration - 1, wdt->base + WDT_RESET_WIDTH); }
+ aspeed_wdt_update_bootstatus(pdev, wdt); + status = readl(wdt->base + WDT_TIMEOUT_STATUS); if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) { - wdt->wdd.bootstatus = WDIOF_CARDRESET; - if (of_device_is_compatible(np, "aspeed,ast2400-wdt") || of_device_is_compatible(np, "aspeed,ast2500-wdt")) wdt->wdd.groups = bswitch_groups; diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c index 416f231809cb..bfe07adb3e3a 100644 --- a/drivers/xen/pci.c +++ b/drivers/xen/pci.c @@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev) pci_mcfg_reserved = true; } #endif + + if (pci_domain_nr(pci_dev->bus) >> 16) { + /* + * The hypercall interface is limited to 16bit PCI segment + * values, do not attempt to register devices with Xen in + * segments greater or equal than 0x10000. + */ + dev_info(dev, + "not registering with Xen: invalid PCI segment\n"); + return 0; + } + if (pci_seg_supported) { DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
@@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev) int r; struct pci_dev *pci_dev = to_pci_dev(dev);
+ if (pci_domain_nr(pci_dev->bus) >> 16) { + /* + * The hypercall interface is limited to 16bit PCI segment + * values. + */ + dev_info(dev, + "not unregistering with Xen: invalid PCI segment\n"); + return 0; + } + if (pci_seg_supported) { struct physdev_pci_device device = { .seg = pci_domain_nr(pci_dev->bus), @@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev) .flags = PCI_DEVICE_RESET_FLR, };
+ if (pci_domain_nr(dev->bus) >> 16) { + /* + * The hypercall interface is limited to 16bit PCI segment + * values. + */ + dev_info(&dev->dev, + "unable to notify Xen of device reset: invalid PCI segment\n"); + return 0; + } + return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device); } EXPORT_SYMBOL_GPL(xen_reset_device); diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 544d3f9010b9..1db82da56db6 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -26,6 +26,8 @@
#define DRV_NAME "xen-platform-pci"
+#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002 + static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; @@ -174,6 +176,8 @@ static int platform_pci_probe(struct pci_dev *pdev, static const struct pci_device_id platform_pci_tbl[] = { {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} };
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 6d32ffb01136..86fe6e779056 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -966,9 +966,15 @@ static int __init xenbus_init(void) if (xen_pv_domain()) xen_store_domain_type = XS_PV; if (xen_hvm_domain()) + { xen_store_domain_type = XS_HVM; - if (xen_hvm_domain() && xen_initial_domain()) - xen_store_domain_type = XS_LOCAL; + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (err) + goto out_error; + xen_store_evtchn = (int)v; + if (!v && xen_initial_domain()) + xen_store_domain_type = XS_LOCAL; + } if (xen_pv_domain() && !xen_start_info->store_evtchn) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) @@ -987,10 +993,6 @@ static int __init xenbus_init(void) xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_HVM: - err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); - if (err) - goto out_error; - xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 4423d8b716a5..aa8656c8b7e7 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1891,6 +1891,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) up_write(&space_info->groups_sem); goto next; } + + /* + * Cache the zone_unusable value before turning the block group + * to read only. As soon as the block group is read only it's + * zone_unusable value gets moved to the block group's read-only + * bytes and isn't available for calculations anymore. We also + * cache it before unlocking the block group, to prevent races + * (reports from KCSAN and such tools) with tasks updating it. + */ + zone_unusable = bg->zone_unusable; + spin_unlock(&bg->lock); spin_unlock(&space_info->lock);
@@ -1907,13 +1918,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) goto next; }
- /* - * Cache the zone_unusable value before turning the block group - * to read only. As soon as the blog group is read only it's - * zone_unusable value gets moved to the block group's read-only - * bytes and isn't available for calculations anymore. - */ - zone_unusable = bg->zone_unusable; ret = inc_block_group_ro(bg, 0); up_write(&space_info->groups_sem); if (ret < 0) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 40332ab62f10..65d883da86c6 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -606,7 +606,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio) free_extent_map(em);
cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE); - cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS); + cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS); if (!cb->compressed_folios) { ret = BLK_STS_RESOURCE; goto out_free_bio; diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index e9cdc1759dad..de23c4b3515e 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -168,13 +168,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl, block_group->discard_eligible_time = 0; queued = !list_empty(&block_group->discard_list); list_del_init(&block_group->discard_list); - /* - * If the block group is currently running in the discard workfn, we - * don't want to deref it, since it's still being used by the workfn. - * The workfn will notice this case and deref the block group when it is - * finished. - */ - if (queued && !running) + if (queued) btrfs_put_block_group(block_group);
spin_unlock(&discard_ctl->lock); @@ -273,9 +267,10 @@ static struct btrfs_block_group *peek_discard_list( block_group->discard_cursor = block_group->start; block_group->discard_state = BTRFS_DISCARD_EXTENTS; } - discard_ctl->block_group = block_group; } if (block_group) { + btrfs_get_block_group(block_group); + discard_ctl->block_group = block_group; *discard_state = block_group->discard_state; *discard_index = block_group->discard_index; } @@ -506,9 +501,20 @@ static void btrfs_discard_workfn(struct work_struct *work)
block_group = peek_discard_list(discard_ctl, &discard_state, &discard_index, now); - if (!block_group || !btrfs_run_discard_work(discard_ctl)) + if (!block_group) return; + if (!btrfs_run_discard_work(discard_ctl)) { + spin_lock(&discard_ctl->lock); + btrfs_put_block_group(block_group); + discard_ctl->block_group = NULL; + spin_unlock(&discard_ctl->lock); + return; + } if (now < block_group->discard_eligible_time) { + spin_lock(&discard_ctl->lock); + btrfs_put_block_group(block_group); + discard_ctl->block_group = NULL; + spin_unlock(&discard_ctl->lock); btrfs_discard_schedule_work(discard_ctl, false); return; } @@ -560,15 +566,7 @@ static void btrfs_discard_workfn(struct work_struct *work) spin_lock(&discard_ctl->lock); discard_ctl->prev_discard = trimmed; discard_ctl->prev_discard_time = now; - /* - * If the block group was removed from the discard list while it was - * running in this workfn, then we didn't deref it, since this function - * still owned that reference. But we set the discard_ctl->block_group - * back to NULL, so we can use that condition to know that now we need - * to deref the block_group. - */ - if (discard_ctl->block_group == NULL) - btrfs_put_block_group(block_group); + btrfs_put_block_group(block_group); discard_ctl->block_group = NULL; __btrfs_discard_schedule_work(discard_ctl, now, false); spin_unlock(&discard_ctl->lock); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 19e5f8eaae77..147c50ef912a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4254,6 +4254,14 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info);
+ /* + * Handle the error fs first, as it will flush and wait for all ordered + * extents. This will generate delayed iputs, thus we want to handle + * it first. + */ + if (unlikely(BTRFS_FS_ERROR(fs_info))) + btrfs_error_commit_super(fs_info); + /* * Wait for any fixup workers to complete. * If we don't wait for them here and they are still running by the time @@ -4274,6 +4282,19 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) */ btrfs_flush_workqueue(fs_info->delalloc_workers);
+ /* + * We can have ordered extents getting their last reference dropped from + * the fs_info->workers queue because for async writes for data bios we + * queue a work for that queue, at btrfs_wq_submit_bio(), that runs + * run_one_async_done() which calls btrfs_bio_end_io() in case the bio + * has an error, and that later function can do the final + * btrfs_put_ordered_extent() on the ordered extent attached to the bio, + * which adds a delayed iput for the inode. So we must flush the queue + * so that we don't have delayed iputs after committing the current + * transaction below and stopping the cleaner and transaction kthreads. + */ + btrfs_flush_workqueue(fs_info->workers); + /* * When finishing a compressed write bio we schedule a work queue item * to finish an ordered extent - btrfs_finish_compressed_write_work() @@ -4343,9 +4364,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) btrfs_err(fs_info, "commit super ret %d", ret); }
- if (BTRFS_FS_ERROR(fs_info)) - btrfs_error_commit_super(fs_info); - kthread_stop(fs_info->transaction_kthread); kthread_stop(fs_info->cleaner_kthread);
@@ -4468,10 +4486,6 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) /* cleanup FS via transaction */ btrfs_cleanup_transaction(fs_info);
- mutex_lock(&fs_info->cleaner_mutex); - btrfs_run_delayed_iputs(fs_info); - mutex_unlock(&fs_info->cleaner_mutex); - down_write(&fs_info->cleanup_work_sem); up_write(&fs_info->cleanup_work_sem); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e263d4b0546f..d322cf82783f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2826,10 +2826,10 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, return eb; }
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *eb, *exists = NULL; int ret;
@@ -2865,8 +2865,11 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, free_eb: btrfs_release_extent_buffer(eb); return exists; -} +#else + /* Stub to avoid linker error when compiled with optimizations turned off. */ + return NULL; #endif +}
static struct extent_buffer *grab_extent_buffer( struct btrfs_fs_info *fs_info, struct page *page) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 8a36117ed453..fcb60837d7dc 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -293,6 +293,8 @@ static inline int num_extent_pages(const struct extent_buffer *eb) */ static inline int num_extent_folios(const struct extent_buffer *eb) { + if (!eb->folios[0]) + return 0; if (folio_order(eb->folios[0])) return 1; return num_extent_pages(eb); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c73a41b1ad56..d8fcc3eb85c8 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1541,8 +1541,8 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg, u64 extent_gen; int ret;
- if (unlikely(!extent_root)) { - btrfs_err(fs_info, "no valid extent root for scrub"); + if (unlikely(!extent_root || !csum_root)) { + btrfs_err(fs_info, "no valid extent or csum root for scrub"); return -EUCLEAN; } memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index b1015f383f75..c843b4aefb8a 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -487,10 +487,8 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) if (p->buf_len >= len) return 0;
- if (len > PATH_MAX) { - WARN_ON(1); - return -ENOMEM; - } + if (WARN_ON(len > PATH_MAX)) + return -ENAMETOOLONG;
path_len = p->end - p->start; old_buf_len = p->buf_len; diff --git a/fs/buffer.c b/fs/buffer.c index 32bd0f4c4223..e9e84512a027 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -176,18 +176,8 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) } EXPORT_SYMBOL(end_buffer_write_sync);
-/* - * Various filesystems appear to want __find_get_block to be non-blocking. - * But it's the page lock which protects the buffers. To get around this, - * we get exclusion from try_to_free_buffers with the blockdev mapping's - * i_private_lock. - * - * Hack idea: for the blockdev mapping, i_private_lock contention - * may be quite high. This code could TryLock the page, and if that - * succeeds, there is no need to take i_private_lock. - */ static struct buffer_head * -__find_get_block_slow(struct block_device *bdev, sector_t block) +__find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic) { struct address_space *bd_mapping = bdev->bd_mapping; const int blkbits = bd_mapping->host->i_blkbits; @@ -204,7 +194,16 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) if (IS_ERR(folio)) goto out;
- spin_lock(&bd_mapping->i_private_lock); + /* + * Folio lock protects the buffers. Callers that cannot block + * will fallback to serializing vs try_to_free_buffers() via + * the i_private_lock. + */ + if (atomic) + spin_lock(&bd_mapping->i_private_lock); + else + folio_lock(folio); + head = folio_buffers(folio); if (!head) goto out_unlock; @@ -236,7 +235,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) 1 << blkbits); } out_unlock: - spin_unlock(&bd_mapping->i_private_lock); + if (atomic) + spin_unlock(&bd_mapping->i_private_lock); + else + folio_unlock(folio); folio_put(folio); out: return ret; @@ -656,7 +658,9 @@ EXPORT_SYMBOL(generic_buffers_fsync); void write_boundary_block(struct block_device *bdev, sector_t bblock, unsigned blocksize) { - struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); + struct buffer_head *bh; + + bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize); if (bh) { if (buffer_dirty(bh)) write_dirty_buffer(bh, 0); @@ -1394,14 +1398,15 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) * it in the LRU and mark it as accessed. If it is not present then return * NULL */ -struct buffer_head * -__find_get_block(struct block_device *bdev, sector_t block, unsigned size) +static struct buffer_head * +find_get_block_common(struct block_device *bdev, sector_t block, + unsigned size, bool atomic) { struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
if (bh == NULL) { /* __find_get_block_slow will mark the page accessed */ - bh = __find_get_block_slow(bdev, block); + bh = __find_get_block_slow(bdev, block, atomic); if (bh) bh_lru_install(bh); } else @@ -1409,8 +1414,23 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
return bh; } + +struct buffer_head * +__find_get_block(struct block_device *bdev, sector_t block, unsigned size) +{ + return find_get_block_common(bdev, block, size, true); +} EXPORT_SYMBOL(__find_get_block);
+/* same as __find_get_block() but allows sleeping contexts */ +struct buffer_head * +__find_get_block_nonatomic(struct block_device *bdev, sector_t block, + unsigned size) +{ + return find_get_block_common(bdev, block, size, false); +} +EXPORT_SYMBOL(__find_get_block_nonatomic); + /** * bdev_getblk - Get a buffer_head in a block device's buffer cache. * @bdev: The block device. @@ -1428,7 +1448,12 @@ EXPORT_SYMBOL(__find_get_block); struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp) { - struct buffer_head *bh = __find_get_block(bdev, block, size); + struct buffer_head *bh; + + if (gfpflags_allow_blocking(gfp)) + bh = __find_get_block_nonatomic(bdev, block, size); + else + bh = __find_get_block(bdev, block, size);
might_alloc(gfp); if (bh) diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index f2d88a358169..10461451185e 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1826,8 +1826,8 @@ static int dlm_tcp_listen_validate(void) { /* We don't support multi-homed hosts */ if (dlm_local_count > 1) { - log_print("TCP protocol can't handle multi-homed hosts, try SCTP"); - return -EINVAL; + log_print("Detect multi-homed hosts but use only the first IP address."); + log_print("Try SCTP, if you want to enable multi-link."); }
return 0; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index edbabb3256c9..2c11e8f3048e 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -453,6 +453,7 @@ int __init erofs_init_shrinker(void); void erofs_exit_shrinker(void); int __init z_erofs_init_subsystem(void); void z_erofs_exit_subsystem(void); +int z_erofs_init_super(struct super_block *sb); unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr_shrink); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, @@ -462,7 +463,6 @@ void z_erofs_put_gbuf(void *ptr); int z_erofs_gbuf_growsize(unsigned int nrpages); int __init z_erofs_gbuf_init(void); void z_erofs_gbuf_exit(void); -int erofs_init_managed_cache(struct super_block *sb); int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb); #else static inline void erofs_shrinker_register(struct super_block *sb) {} @@ -471,7 +471,7 @@ static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} static inline int z_erofs_init_subsystem(void) { return 0; } static inline void z_erofs_exit_subsystem(void) {} -static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } +static inline int z_erofs_init_super(struct super_block *sb) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP */
#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 5b279977c9d5..3421448fef0e 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -664,9 +664,16 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) else sb->s_flags &= ~SB_POSIXACL;
-#ifdef CONFIG_EROFS_FS_ZIP - xa_init(&sbi->managed_pslots); -#endif + err = z_erofs_init_super(sb); + if (err) + return err; + + if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) { + inode = erofs_iget(sb, sbi->packed_nid); + if (IS_ERR(inode)) + return PTR_ERR(inode); + sbi->packed_inode = inode; + }
inode = erofs_iget(sb, sbi->root_nid); if (IS_ERR(inode)) @@ -678,24 +685,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) iput(inode); return -EINVAL; } - sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM;
erofs_shrinker_register(sb); - if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) { - sbi->packed_inode = erofs_iget(sb, sbi->packed_nid); - if (IS_ERR(sbi->packed_inode)) { - err = PTR_ERR(sbi->packed_inode); - sbi->packed_inode = NULL; - return err; - } - } - err = erofs_init_managed_cache(sb); - if (err) - return err; - err = erofs_xattr_prefixes_init(sb); if (err) return err; @@ -831,6 +825,16 @@ static int erofs_init_fs_context(struct fs_context *fc) return 0; }
+static void erofs_drop_internal_inodes(struct erofs_sb_info *sbi) +{ + iput(sbi->packed_inode); + sbi->packed_inode = NULL; +#ifdef CONFIG_EROFS_FS_ZIP + iput(sbi->managed_cache); + sbi->managed_cache = NULL; +#endif +} + static void erofs_kill_sb(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); @@ -840,6 +844,7 @@ static void erofs_kill_sb(struct super_block *sb) kill_anon_super(sb); else kill_block_super(sb); + erofs_drop_internal_inodes(sbi); fs_put_dax(sbi->dif0.dax_dev, NULL); erofs_fscache_unregister_fs(sb); erofs_sb_free(sbi); @@ -850,17 +855,10 @@ static void erofs_put_super(struct super_block *sb) { struct erofs_sb_info *const sbi = EROFS_SB(sb);
- DBG_BUGON(!sbi); - erofs_unregister_sysfs(sb); erofs_shrinker_unregister(sb); erofs_xattr_prefixes_cleanup(sb); -#ifdef CONFIG_EROFS_FS_ZIP - iput(sbi->managed_cache); - sbi->managed_cache = NULL; -#endif - iput(sbi->packed_inode); - sbi->packed_inode = NULL; + erofs_drop_internal_inodes(sbi); erofs_free_dev_context(sbi->devs); sbi->devs = NULL; erofs_fscache_unregister_fs(sb); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index e5e94afc5af8..74521d7dbee1 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -663,18 +663,18 @@ static const struct address_space_operations z_erofs_cache_aops = { .invalidate_folio = z_erofs_cache_invalidate_folio, };
-int erofs_init_managed_cache(struct super_block *sb) +int z_erofs_init_super(struct super_block *sb) { struct inode *const inode = new_inode(sb);
if (!inode) return -ENOMEM; - set_nlink(inode, 1); inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &z_erofs_cache_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); EROFS_SB(sb)->managed_cache = inode; + xa_init(&EROFS_SB(sb)->managed_pslots); return 0; }
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 3801516ac507..cc4dcb7a32b5 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -274,9 +274,11 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, sector_t last_block; sector_t phys = 0; sector_t valid_blks; + loff_t i_size;
mutex_lock(&sbi->s_lock); - last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb); + i_size = i_size_read(inode); + last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size, sb); if (iblock >= last_block && !create) goto done;
@@ -305,102 +307,95 @@ static int exfat_get_block(struct inode *inode, sector_t iblock, if (buffer_delay(bh_result)) clear_buffer_delay(bh_result);
- if (create) { + /* + * In most cases, we just need to set bh_result to mapped, unmapped + * or new status as follows: + * 1. i_size == valid_size + * 2. write case (create == 1) + * 3. direct_read (!bh_result->b_folio) + * -> the unwritten part will be zeroed in exfat_direct_IO() + * + * Otherwise, in the case of buffered read, it is necessary to take + * care the last nested block if valid_size is not equal to i_size. + */ + if (i_size == ei->valid_size || create || !bh_result->b_folio) valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb); + else + valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
- if (iblock + max_blocks < valid_blks) { - /* The range has been written, map it */ - goto done; - } else if (iblock < valid_blks) { - /* - * The range has been partially written, - * map the written part. - */ - max_blocks = valid_blks - iblock; - goto done; - } + /* The range has been fully written, map it */ + if (iblock + max_blocks < valid_blks) + goto done;
- /* The area has not been written, map and mark as new. */ - set_buffer_new(bh_result); + /* The range has been partially written, map the written part */ + if (iblock < valid_blks) { + max_blocks = valid_blks - iblock; + goto done; + }
+ /* The area has not been written, map and mark as new for create case */ + if (create) { + set_buffer_new(bh_result); ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb); mark_inode_dirty(inode); - } else { - valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb); + goto done; + }
- if (iblock + max_blocks < valid_blks) { - /* The range has been written, map it */ - goto done; - } else if (iblock < valid_blks) { - /* - * The area has been partially written, - * map the written part. - */ - max_blocks = valid_blks - iblock; + /* + * The area has just one block partially written. + * In that case, we should read and fill the unwritten part of + * a block with zero. + */ + if (bh_result->b_folio && iblock == valid_blks && + (ei->valid_size & (sb->s_blocksize - 1))) { + loff_t size, pos; + void *addr; + + max_blocks = 1; + + /* + * No buffer_head is allocated. + * (1) bmap: It's enough to set blocknr without I/O. + * (2) read: The unwritten part should be filled with zero. + * If a folio does not have any buffers, + * let's returns -EAGAIN to fallback to + * block_read_full_folio() for per-bh IO. + */ + if (!folio_buffers(bh_result->b_folio)) { + err = -EAGAIN; goto done; - } else if (iblock == valid_blks && - (ei->valid_size & (sb->s_blocksize - 1))) { - /* - * The block has been partially written, - * zero the unwritten part and map the block. - */ - loff_t size, pos; - void *addr; - - max_blocks = 1; - - /* - * For direct read, the unwritten part will be zeroed in - * exfat_direct_IO() - */ - if (!bh_result->b_folio) - goto done; - - /* - * No buffer_head is allocated. - * (1) bmap: It's enough to fill bh_result without I/O. - * (2) read: The unwritten part should be filled with 0 - * If a folio does not have any buffers, - * let's returns -EAGAIN to fallback to - * per-bh IO like block_read_full_folio(). - */ - if (!folio_buffers(bh_result->b_folio)) { - err = -EAGAIN; - goto done; - } + }
- pos = EXFAT_BLK_TO_B(iblock, sb); - size = ei->valid_size - pos; - addr = folio_address(bh_result->b_folio) + - offset_in_folio(bh_result->b_folio, pos); + pos = EXFAT_BLK_TO_B(iblock, sb); + size = ei->valid_size - pos; + addr = folio_address(bh_result->b_folio) + + offset_in_folio(bh_result->b_folio, pos);
- /* Check if bh->b_data points to proper addr in folio */ - if (bh_result->b_data != addr) { - exfat_fs_error_ratelimit(sb, + /* Check if bh->b_data points to proper addr in folio */ + if (bh_result->b_data != addr) { + exfat_fs_error_ratelimit(sb, "b_data(%p) != folio_addr(%p)", bh_result->b_data, addr); - err = -EINVAL; - goto done; - } - - /* Read a block */ - err = bh_read(bh_result, 0); - if (err < 0) - goto done; + err = -EINVAL; + goto done; + }
- /* Zero unwritten part of a block */ - memset(bh_result->b_data + size, 0, - bh_result->b_size - size); + /* Read a block */ + err = bh_read(bh_result, 0); + if (err < 0) + goto done;
- err = 0; - } else { - /* - * The range has not been written, clear the mapped flag - * to only zero the cache and do not read from disk. - */ - clear_buffer_mapped(bh_result); - } + /* Zero unwritten part of a block */ + memset(bh_result->b_data + size, 0, bh_result->b_size - size); + err = 0; + goto done; } + + /* + * The area has not been written, clear mapped for read/bmap cases. + * If so, it will be filled with zero without reading from disk. + */ + clear_buffer_mapped(bh_result); done: bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb); if (err < 0) diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 8042ad873808..c48fd36b2d74 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -649,8 +649,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, /* Hm, nope. Are (enough) root reserved clusters available? */ if (uid_eq(sbi->s_resuid, current_fsuid()) || (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || - capable(CAP_SYS_RESOURCE) || - (flags & EXT4_MB_USE_ROOT_BLOCKS)) { + (flags & EXT4_MB_USE_ROOT_BLOCKS) || + capable(CAP_SYS_RESOURCE)) {
if (free_clusters >= (nclusters + dirty_clusters + resv_clusters)) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index bbffb76d9a90..c2e6989a568c 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -278,7 +278,8 @@ struct ext4_system_blocks { /* * Flags for ext4_io_end->flags */ -#define EXT4_IO_END_UNWRITTEN 0x0001 +#define EXT4_IO_END_UNWRITTEN 0x0001 +#define EXT4_IO_END_FAILED 0x0002
struct ext4_io_end_vec { struct list_head list; /* list of io_end_vec */ @@ -3012,6 +3013,8 @@ extern int ext4_inode_attach_jinode(struct inode *inode); extern int ext4_can_truncate(struct inode *inode); extern int ext4_truncate(struct inode *); extern int ext4_break_layouts(struct inode *); +extern int ext4_truncate_page_cache_block_range(struct inode *inode, + loff_t start, loff_t end); extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length); extern void ext4_set_inode_flags(struct inode *, bool init); extern int ext4_alloc_da_blocks(struct inode *inode); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 60909af2d4a5..ba3419958a83 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4667,22 +4667,13 @@ static long ext4_zero_range(struct file *file, loff_t offset, goto out_mutex; }
- /* - * For journalled data we need to write (and checkpoint) pages - * before discarding page cache to avoid inconsitent data on - * disk in case of crash before zeroing trans is committed. - */ - if (ext4_should_journal_data(inode)) { - ret = filemap_write_and_wait_range(mapping, start, - end - 1); - if (ret) { - filemap_invalidate_unlock(mapping); - goto out_mutex; - } + /* Now release the pages and zero block aligned part of pages */ + ret = ext4_truncate_page_cache_block_range(inode, start, end); + if (ret) { + filemap_invalidate_unlock(mapping); + goto out_mutex; }
- /* Now release the pages and zero block aligned part of pages */ - truncate_pagecache_range(inode, start, end - 1); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 487d9aec56c9..38fe9a213d09 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -31,6 +31,7 @@ #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/mpage.h> +#include <linux/rmap.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/bio.h> @@ -3879,6 +3880,68 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, return ret; }
+static inline void ext4_truncate_folio(struct inode *inode, + loff_t start, loff_t end) +{ + unsigned long blocksize = i_blocksize(inode); + struct folio *folio; + + /* Nothing to be done if no complete block needs to be truncated. */ + if (round_up(start, blocksize) >= round_down(end, blocksize)) + return; + + folio = filemap_lock_folio(inode->i_mapping, start >> PAGE_SHIFT); + if (IS_ERR(folio)) + return; + + if (folio_mkclean(folio)) + folio_mark_dirty(folio); + folio_unlock(folio); + folio_put(folio); +} + +int ext4_truncate_page_cache_block_range(struct inode *inode, + loff_t start, loff_t end) +{ + unsigned long blocksize = i_blocksize(inode); + int ret; + + /* + * For journalled data we need to write (and checkpoint) pages + * before discarding page cache to avoid inconsitent data on disk + * in case of crash before freeing or unwritten converting trans + * is committed. + */ + if (ext4_should_journal_data(inode)) { + ret = filemap_write_and_wait_range(inode->i_mapping, start, + end - 1); + if (ret) + return ret; + goto truncate_pagecache; + } + + /* + * If the block size is less than the page size, the file's mapped + * blocks within one page could be freed or converted to unwritten. + * So it's necessary to remove writable userspace mappings, and then + * ext4_page_mkwrite() can be called during subsequent write access + * to these partial folios. + */ + if (!IS_ALIGNED(start | end, PAGE_SIZE) && + blocksize < PAGE_SIZE && start < inode->i_size) { + loff_t page_boundary = round_up(start, PAGE_SIZE); + + ext4_truncate_folio(inode, start, min(page_boundary, end)); + if (end > page_boundary) + ext4_truncate_folio(inode, + round_down(end, PAGE_SIZE), end); + } + +truncate_pagecache: + truncate_pagecache_range(inode, start, end - 1); + return 0; +} + static void ext4_wait_dax_page(struct inode *inode) { filemap_invalidate_unlock(inode->i_mapping); @@ -3933,17 +3996,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
trace_ext4_punch_hole(inode, offset, length, 0);
- /* - * Write out all dirty pages to avoid race conditions - * Then release them. - */ - if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { - ret = filemap_write_and_wait_range(mapping, offset, - offset + length - 1); - if (ret) - return ret; - } - inode_lock(inode);
/* No need to punch hole beyond i_size */ @@ -4005,8 +4057,11 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) ret = ext4_update_disksize_before_punch(inode, offset, length); if (ret) goto out_dio; - truncate_pagecache_range(inode, first_block_offset, - last_block_offset); + + ret = ext4_truncate_page_cache_block_range(inode, + first_block_offset, last_block_offset + 1); + if (ret) + goto out_dio; }
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 92f49d7eb3c0..109cf88e7caa 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -6644,7 +6644,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, for (i = 0; i < count; i++) { cond_resched(); if (is_metadata) - bh = sb_find_get_block(inode->i_sb, block + i); + bh = sb_find_get_block_nonatomic(inode->i_sb, + block + i); ext4_forget(handle, is_metadata, inode, bh, block + i); } } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index b7b9261fec3b..cb023922c93c 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -181,14 +181,25 @@ static int ext4_end_io_end(ext4_io_end_t *io_end) "list->prev 0x%p\n", io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
- io_end->handle = NULL; /* Following call will use up the handle */ - ret = ext4_convert_unwritten_io_end_vec(handle, io_end); + /* + * Do not convert the unwritten extents if data writeback fails, + * or stale data may be exposed. + */ + io_end->handle = NULL; /* Following call will use up the handle */ + if (unlikely(io_end->flag & EXT4_IO_END_FAILED)) { + ret = -EIO; + if (handle) + jbd2_journal_free_reserved(handle); + } else { + ret = ext4_convert_unwritten_io_end_vec(handle, io_end); + } if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) { ext4_msg(inode->i_sb, KERN_EMERG, "failed to convert unwritten extents to written " "extents -- potential data loss! " "(inode %lu, error %d)", inode->i_ino, ret); } + ext4_clear_io_unwritten_flag(io_end); ext4_release_io_end(io_end); return ret; @@ -344,6 +355,7 @@ static void ext4_end_bio(struct bio *bio) bio->bi_status, inode->i_ino, (unsigned long long) bi_sector >> (inode->i_blkbits - 9)); + io_end->flag |= EXT4_IO_END_FAILED; mapping_set_error(inode->i_mapping, blk_status_to_errno(bio->bi_status)); } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4291ab3c20be..99117d1e1bdd 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2787,6 +2787,13 @@ static int ext4_check_opt_consistency(struct fs_context *fc, }
if (is_remount) { + if (!sbi->s_journal && + ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) { + ext4_msg(NULL, KERN_WARNING, + "Remounting fs w/o journal so ignoring data_err option"); + ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT); + } + if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { ext4_msg(NULL, KERN_ERR, "can't mount with " @@ -5396,6 +5403,11 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) "data=, fs mounted w/o journal"); goto failed_mount3a; } + if (test_opt(sb, DATA_ERR_ABORT)) { + ext4_msg(sb, KERN_ERR, + "can't mount with data_err=abort, fs mounted w/o journal"); + goto failed_mount3a; + } sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); @@ -6744,6 +6756,7 @@ static int ext4_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; int ret; + bool old_ro = sb_rdonly(sb);
fc->s_fs_info = EXT4_SB(sb);
@@ -6755,9 +6768,9 @@ static int ext4_reconfigure(struct fs_context *fc) if (ret < 0) return ret;
- ext4_msg(sb, KERN_INFO, "re-mounted %pU %s. Quota mode: %s.", - &sb->s_uuid, sb_rdonly(sb) ? "ro" : "r/w", - ext4_quota_mode(sb)); + ext4_msg(sb, KERN_INFO, "re-mounted %pU%s.", + &sb->s_uuid, + (old_ro != sb_rdonly(sb)) ? (sb_rdonly(sb) ? " ro" : " r/w") : "");
return 0; } diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index d9a44f03e558..7df638f901a1 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -61,6 +61,12 @@ struct f2fs_attr { int id; };
+struct f2fs_base_attr { + struct attribute attr; + ssize_t (*show)(struct f2fs_base_attr *a, char *buf); + ssize_t (*store)(struct f2fs_base_attr *a, const char *buf, size_t len); +}; + static ssize_t f2fs_sbi_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf);
@@ -864,6 +870,25 @@ static void f2fs_sb_release(struct kobject *kobj) complete(&sbi->s_kobj_unregister); }
+static ssize_t f2fs_base_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct f2fs_base_attr *a = container_of(attr, + struct f2fs_base_attr, attr); + + return a->show ? a->show(a, buf) : 0; +} + +static ssize_t f2fs_base_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct f2fs_base_attr *a = container_of(attr, + struct f2fs_base_attr, attr); + + return a->store ? a->store(a, buf, len) : 0; +} + /* * Note that there are three feature list entries: * 1) /sys/fs/f2fs/features @@ -882,14 +907,13 @@ static void f2fs_sb_release(struct kobject *kobj) * please add new on-disk feature in this list only. * - ref. F2FS_SB_FEATURE_RO_ATTR() */ -static ssize_t f2fs_feature_show(struct f2fs_attr *a, - struct f2fs_sb_info *sbi, char *buf) +static ssize_t f2fs_feature_show(struct f2fs_base_attr *a, char *buf) { return sysfs_emit(buf, "supported\n"); }
#define F2FS_FEATURE_RO_ATTR(_name) \ -static struct f2fs_attr f2fs_attr_##_name = { \ +static struct f2fs_base_attr f2fs_base_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = 0444 }, \ .show = f2fs_feature_show, \ } @@ -1258,37 +1282,38 @@ static struct attribute *f2fs_attrs[] = { }; ATTRIBUTE_GROUPS(f2fs);
+#define BASE_ATTR_LIST(name) (&f2fs_base_attr_##name.attr) static struct attribute *f2fs_feat_attrs[] = { #ifdef CONFIG_FS_ENCRYPTION - ATTR_LIST(encryption), - ATTR_LIST(test_dummy_encryption_v2), + BASE_ATTR_LIST(encryption), + BASE_ATTR_LIST(test_dummy_encryption_v2), #if IS_ENABLED(CONFIG_UNICODE) - ATTR_LIST(encrypted_casefold), + BASE_ATTR_LIST(encrypted_casefold), #endif #endif /* CONFIG_FS_ENCRYPTION */ #ifdef CONFIG_BLK_DEV_ZONED - ATTR_LIST(block_zoned), + BASE_ATTR_LIST(block_zoned), #endif - ATTR_LIST(atomic_write), - ATTR_LIST(extra_attr), - ATTR_LIST(project_quota), - ATTR_LIST(inode_checksum), - ATTR_LIST(flexible_inline_xattr), - ATTR_LIST(quota_ino), - ATTR_LIST(inode_crtime), - ATTR_LIST(lost_found), + BASE_ATTR_LIST(atomic_write), + BASE_ATTR_LIST(extra_attr), + BASE_ATTR_LIST(project_quota), + BASE_ATTR_LIST(inode_checksum), + BASE_ATTR_LIST(flexible_inline_xattr), + BASE_ATTR_LIST(quota_ino), + BASE_ATTR_LIST(inode_crtime), + BASE_ATTR_LIST(lost_found), #ifdef CONFIG_FS_VERITY - ATTR_LIST(verity), + BASE_ATTR_LIST(verity), #endif - ATTR_LIST(sb_checksum), + BASE_ATTR_LIST(sb_checksum), #if IS_ENABLED(CONFIG_UNICODE) - ATTR_LIST(casefold), + BASE_ATTR_LIST(casefold), #endif - ATTR_LIST(readonly), + BASE_ATTR_LIST(readonly), #ifdef CONFIG_F2FS_FS_COMPRESSION - ATTR_LIST(compression), + BASE_ATTR_LIST(compression), #endif - ATTR_LIST(pin_file), + BASE_ATTR_LIST(pin_file), NULL, }; ATTRIBUTE_GROUPS(f2fs_feat); @@ -1362,9 +1387,14 @@ static struct kset f2fs_kset = { .kobj = {.ktype = &f2fs_ktype}, };
+static const struct sysfs_ops f2fs_feat_attr_ops = { + .show = f2fs_base_attr_show, + .store = f2fs_base_attr_store, +}; + static const struct kobj_type f2fs_feat_ktype = { .default_groups = f2fs_feat_groups, - .sysfs_ops = &f2fs_attr_ops, + .sysfs_ops = &f2fs_feat_attr_ops, };
static struct kobject f2fs_feat = { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index a1e86ec07c38..ff543dc09130 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1133,6 +1133,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, else if (err == -EINTR) fuse_invalidate_attr(inode);
+ if (err == -ENOSYS) + err = -EPERM; return err; }
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a51fe42732c4..4f1eca99786b 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -843,12 +843,13 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock) __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { - struct gfs2_holder *gh = NULL; + struct gfs2_holder *gh;
if (test_bit(GLF_LOCK, &gl->gl_flags)) return; set_bit(GLF_LOCK, &gl->gl_flags);
+ /* While a demote is in progress, the GLF_LOCK flag must be set. */ GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
if (test_bit(GLF_DEMOTE, &gl->gl_flags) && @@ -860,18 +861,22 @@ __acquires(&gl->gl_lockref.lock) set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; + do_xmote(gl, NULL, gl->gl_target); + return; } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); if (do_promote(gl)) goto out_unlock; gh = find_first_waiter(gl); + if (!gh) + goto out_unlock; gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) do_error(gl, 0); /* Fail queued try locks */ + do_xmote(gl, gh, gl->gl_target); + return; } - do_xmote(gl, gh, gl->gl_target); - return;
out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 667f67342c52..f85f401526c5 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -287,19 +287,20 @@ static int fc_do_one_pass(journal_t *journal, int jbd2_journal_recover(journal_t *journal) { int err, err2; - journal_superblock_t * sb; - struct recovery_info info;
memset(&info, 0, sizeof(info)); - sb = journal->j_superblock;
/* * The journal superblock's s_start field (the current log head) * is always zero if, and only if, the journal was cleanly - * unmounted. + * unmounted. We use its in-memory version j_tail here because + * jbd2_journal_wipe() could have updated it without updating journal + * superblock. */ - if (!sb->s_start) { + if (!journal->j_tail) { + journal_superblock_t *sb = journal->j_superblock; + jbd2_debug(1, "No recovery required, last transaction %d, head block %u\n", be32_to_cpu(sb->s_sequence), be32_to_cpu(sb->s_head)); journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1; diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index ce63d5fde9c3..f68fc8c255f0 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -345,7 +345,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr, bh = bh_in;
if (!bh) { - bh = __find_get_block(bdev, blocknr, journal->j_blocksize); + bh = __find_get_block_nonatomic(bdev, blocknr, + journal->j_blocksize); if (bh) BUFFER_TRACE(bh, "found on hash"); } @@ -355,7 +356,8 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
/* If there is a different buffer_head lying around in * memory anywhere... */ - bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); + bh2 = __find_get_block_nonatomic(bdev, blocknr, + journal->j_blocksize); if (bh2) { /* ... and it has RevokeValid status... */ if (bh2 != bh && buffer_revokevalid(bh2)) @@ -466,7 +468,8 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh) * state machine will get very upset later on. */ if (need_cancel) { struct buffer_head *bh2; - bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size); + bh2 = __find_get_block_nonatomic(bh->b_bdev, bh->b_blocknr, + bh->b_size); if (bh2) { if (bh2 != bh) clear_buffer_revoked(bh2); @@ -495,9 +498,9 @@ void jbd2_clear_buffer_revoked_flags(journal_t *journal) struct jbd2_revoke_record_s *record; struct buffer_head *bh; record = (struct jbd2_revoke_record_s *)list_entry; - bh = __find_get_block(journal->j_fs_dev, - record->blocknr, - journal->j_blocksize); + bh = __find_get_block_nonatomic(journal->j_fs_dev, + record->blocknr, + journal->j_blocksize); if (bh) { clear_buffer_revoked(bh); __brelse(bh); diff --git a/fs/namespace.c b/fs/namespace.c index c3c1e8c644f2..c1ac585e41e3 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -750,12 +750,8 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) smp_mb(); // see mntput_no_expire() and do_umount() if (likely(!read_seqretry(&mount_lock, seq))) return 0; - if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { - mnt_add_count(mnt, -1); - return 1; - } lock_mount_hash(); - if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { + if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) { mnt_add_count(mnt, -1); unlock_mount_hash(); return 1; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 325ba0663a6d..8bdbc4dca89c 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -307,7 +307,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi) if (delegation == NULL) goto out; spin_lock(&delegation->lock); - if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { + if (delegation->inode && + !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); /* Refcount matched in nfs_end_delegation_return() */ ret = nfs_get_delegation(delegation); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index a1cfe4cc60c4..8f7ea4076653 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1263,6 +1263,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, case -ECONNRESET: case -EHOSTDOWN: case -EHOSTUNREACH: + case -ENETDOWN: case -ENETUNREACH: case -EADDRINUSE: case -ENOBUFS: diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 596f35170137..330273cf9453 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) { + if (unlikely(nfs_current_task_exiting())) + return -EINTR; schedule(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 8b568a514fd1..1be4be3d4a2b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -901,6 +901,11 @@ static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid) NFS4_STATEID_OTHER_SIZE); }
+static inline bool nfs_current_task_exiting(void) +{ + return (current->flags & PF_EXITING) != 0; +} + static inline bool nfs_error_is_fatal(int err) { switch (err) { diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 1566163c6d85..88b0fb343ae0 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); schedule_timeout(NFS_JUKEBOX_RETRY_TIME); res = -ERESTARTSYS; - } while (!fatal_signal_pending(current)); + } while (!fatal_signal_pending(current) && !nfs_current_task_exiting()); return res; }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ca01f79c82e4..11f2b5cb3b06 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -434,6 +434,8 @@ static int nfs4_delay_killable(long *timeout) { might_sleep();
+ if (unlikely(nfs_current_task_exiting())) + return -EINTR; __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); schedule_timeout(nfs4_update_delay(timeout)); if (!__fatal_signal_pending(current)) @@ -445,6 +447,8 @@ static int nfs4_delay_interruptible(long *timeout) { might_sleep();
+ if (unlikely(nfs_current_task_exiting())) + return -EINTR; __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); schedule_timeout(nfs4_update_delay(timeout)); if (!signal_pending(current)) @@ -1765,7 +1769,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, rcu_read_unlock(); trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!fatal_signal_pending(current)) { + if (!fatal_signal_pending(current) && + !nfs_current_task_exiting()) { if (schedule_timeout(5*HZ) == 0) status = -EAGAIN; else @@ -3569,7 +3574,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, write_sequnlock(&state->seqlock); trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (fatal_signal_pending(current)) + if (fatal_signal_pending(current) || nfs_current_task_exiting()) status = -EINTR; else if (schedule_timeout(5*HZ) != 0) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index dafd61186557..397a86011878 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2740,7 +2740,15 @@ static void nfs4_state_manager(struct nfs_client *clp) pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" " with error %d\n", section_sep, section, clp->cl_hostname, -status); - ssleep(1); + switch (status) { + case -ENETDOWN: + case -ENETUNREACH: + nfs_mark_client_ready(clp, -EIO); + break; + default: + ssleep(1); + break; + } out_drain: memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index ac03fd3c330c..ecd71c190885 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -693,8 +693,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) int blocksize; int err;
- down_write(&nilfs->ns_sem); - blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { nilfs_err(sb, "unable to set blocksize"); @@ -767,7 +765,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) set_nilfs_init(nilfs); err = 0; out: - up_write(&nilfs->ns_sem); return err;
failed_sbh: diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 2ebee1dced1b..c2a73bfb16aa 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1271,7 +1271,7 @@ static int ocfs2_force_read_journal(struct inode *inode) }
for (i = 0; i < p_blocks; i++, p_blkno++) { - bh = __find_get_block(osb->sb->s_bdev, p_blkno, + bh = __find_get_block_nonatomic(osb->sb->s_bdev, p_blkno, osb->sb->s_blocksize); /* block not cached. */ if (!bh) diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index aae6d2b8767d..63d7c1ca0dfd 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -23,9 +23,9 @@ static int orangefs_writepage_locked(struct page *page, struct orangefs_write_range *wr = NULL; struct iov_iter iter; struct bio_vec bv; - size_t len, wlen; + size_t wlen; ssize_t ret; - loff_t off; + loff_t len, off;
set_page_writeback(page);
@@ -91,8 +91,7 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow, struct orangefs_write_range *wrp, wr; struct iov_iter iter; ssize_t ret; - size_t len; - loff_t off; + loff_t len, off; int i;
len = i_size_read(inode); diff --git a/fs/pidfs.c b/fs/pidfs.c index 80675b6bf884..52b7e4f76732 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -95,20 +95,21 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) { struct pid *pid = pidfd_pid(file); - bool thread = file->f_flags & PIDFD_THREAD; struct task_struct *task; __poll_t poll_flags = 0;
poll_wait(file, &pid->wait_pidfd, pts); /* - * Depending on PIDFD_THREAD, inform pollers when the thread - * or the whole thread-group exits. + * Don't wake waiters if the thread-group leader exited + * prematurely. They either get notified when the last subthread + * exits or not at all if one of the remaining subthreads execs + * and assumes the struct pid of the old thread-group leader. */ guard(rcu)(); task = pid_task(pid, PIDTYPE_PID); if (!task) poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP; - else if (task->exit_state && (thread || thread_group_empty(task))) + else if (task->exit_state && !delay_group_leader(task)) poll_flags = EPOLLIN | EPOLLRDNORM;
return poll_flags; diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index 56815799ce79..9de6b280c4f4 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -265,7 +265,7 @@ static void parse_options(char *options) static int pstore_show_options(struct seq_file *m, struct dentry *root) { if (kmsg_bytes != CONFIG_PSTORE_DEFAULT_KMSG_BYTES) - seq_printf(m, ",kmsg_bytes=%lu", kmsg_bytes); + seq_printf(m, ",kmsg_bytes=%u", kmsg_bytes); return 0; }
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h index 801d6c0b170c..a0fc51196910 100644 --- a/fs/pstore/internal.h +++ b/fs/pstore/internal.h @@ -6,7 +6,7 @@ #include <linux/time.h> #include <linux/pstore.h>
-extern unsigned long kmsg_bytes; +extern unsigned int kmsg_bytes;
#ifdef CONFIG_PSTORE_FTRACE extern void pstore_register_ftrace(void); @@ -35,7 +35,7 @@ static inline void pstore_unregister_pmsg(void) {}
extern struct pstore_info *psinfo;
-extern void pstore_set_kmsg_bytes(int); +extern void pstore_set_kmsg_bytes(unsigned int bytes); extern void pstore_get_records(int); extern void pstore_get_backend_records(struct pstore_info *psi, struct dentry *root, int quiet); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index f56b066ab80c..557cf9d40177 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -92,8 +92,8 @@ module_param(compress, charp, 0444); MODULE_PARM_DESC(compress, "compression to use");
/* How much of the kernel log to snapshot */ -unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES; -module_param(kmsg_bytes, ulong, 0444); +unsigned int kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES; +module_param(kmsg_bytes, uint, 0444); MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
static void *compress_workspace; @@ -107,9 +107,9 @@ static void *compress_workspace; static char *big_oops_buf; static size_t max_compressed_size;
-void pstore_set_kmsg_bytes(int bytes) +void pstore_set_kmsg_bytes(unsigned int bytes) { - kmsg_bytes = bytes; + WRITE_ONCE(kmsg_bytes, bytes); }
/* Tag each group of saved records with a sequence number */ @@ -278,6 +278,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail) { struct kmsg_dump_iter iter; + unsigned int remaining = READ_ONCE(kmsg_bytes); unsigned long total = 0; const char *why; unsigned int part = 1; @@ -300,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, kmsg_dump_rewind(&iter);
oopscount++; - while (total < kmsg_bytes) { + while (total < remaining) { char *dst; size_t dst_size; int header_size; diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c index e36f0e2d7d21..9a73478e0068 100644 --- a/fs/smb/client/cifsacl.c +++ b/fs/smb/client/cifsacl.c @@ -811,7 +811,23 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, return;
for (i = 0; i < num_aces; ++i) { + if (end_of_acl - acl_base < acl_size) + break; + ppace[i] = (struct smb_ace *) (acl_base + acl_size); + acl_base = (char *)ppace[i]; + acl_size = offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth); + + if (end_of_acl - acl_base < acl_size || + ppace[i]->sid.num_subauth == 0 || + ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || + (end_of_acl - acl_base < + acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || + (le16_to_cpu(ppace[i]->size) < + acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth)) + break; + #ifdef CONFIG_CIFS_DEBUG2 dump_ace(ppace[i], end_of_acl); #endif @@ -855,7 +871,6 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, (void *)ppace[i], sizeof(struct smb_ace)); */
- acl_base = (char *)ppace[i]; acl_size = le16_to_cpu(ppace[i]->size); }
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h index ee78bb6741d6..28f8ca470770 100644 --- a/fs/smb/client/cifspdu.h +++ b/fs/smb/client/cifspdu.h @@ -1226,10 +1226,9 @@ typedef struct smb_com_query_information_rsp { typedef struct smb_com_setattr_req { struct smb_hdr hdr; /* wct = 8 */ __le16 attr; - __le16 time_low; - __le16 time_high; + __le32 last_write_time; __le16 reserved[5]; /* must be zero */ - __u16 ByteCount; + __le16 ByteCount; __u8 BufferFormat; /* 4 = ASCII */ unsigned char fileName[]; } __attribute__((packed)) SETATTR_REQ; diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 90b7b30abfbd..6e938b17875f 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -31,6 +31,9 @@ extern void cifs_small_buf_release(void *); extern void free_rsp_buf(int, void *); extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, unsigned int /* length */); +extern int smb_send_kvec(struct TCP_Server_Info *server, + struct msghdr *msg, + size_t *sent); extern unsigned int _get_xid(void); extern void _free_xid(unsigned int); #define get_xid() \ @@ -393,6 +396,10 @@ extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon); extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData);
+extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, + const char *fileName, __le32 attributes, __le64 write_time, + const struct nls_table *nls_codepage, + struct cifs_sb_info *cifs_sb); extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index e2a14e25da87..8667f403a0ab 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -5199,6 +5199,63 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, return rc; }
+int +SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, + const char *fileName, __le32 attributes, __le64 write_time, + const struct nls_table *nls_codepage, + struct cifs_sb_info *cifs_sb) +{ + SETATTR_REQ *pSMB; + SETATTR_RSP *pSMBr; + struct timespec64 ts; + int bytes_returned; + int name_len; + int rc; + + cifs_dbg(FYI, "In %s path %s\n", __func__, fileName); + +retry: + rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, + (void **) &pSMBr); + if (rc) + return rc; + + if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { + name_len = + cifsConvertToUTF16((__le16 *) pSMB->fileName, + fileName, PATH_MAX, nls_codepage, + cifs_remap(cifs_sb)); + name_len++; /* trailing null */ + name_len *= 2; + } else { + name_len = copy_path_name(pSMB->fileName, fileName); + } + /* Only few attributes can be set by this command, others are not accepted by Win9x. */ + pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) & + (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE)); + /* Zero write time value (in both NT and SETATTR formats) means to not change it. */ + if (le64_to_cpu(write_time) != 0) { + ts = cifs_NTtimeToUnix(write_time); + pSMB->last_write_time = cpu_to_le32(ts.tv_sec); + } + pSMB->BufferFormat = 0x04; + name_len++; /* account for buffer type byte */ + inc_rfc1001_len(pSMB, (__u16)name_len); + pSMB->ByteCount = cpu_to_le16(name_len); + + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned, 0); + if (rc) + cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc); + + cifs_buf_release(pSMB); + + if (rc == -EAGAIN) + goto retry; + + return rc; +} + /* Some legacy servers such as NT4 require that the file times be set on an open handle, rather than by pathname - this is awkward due to potential access conflicts on the open, but it is unavoidable for these diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 112057c7ca11..8260d0e07a62 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -3063,8 +3063,10 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) * sessinit is sent but no second negprot */ struct rfc1002_session_packet req = {}; - struct smb_hdr *smb_buf = (struct smb_hdr *)&req; + struct msghdr msg = {}; + struct kvec iov = {}; unsigned int len; + size_t sent;
req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
@@ -3093,10 +3095,18 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) * As per rfc1002, @len must be the number of bytes that follows the * length field of a rfc1002 session request payload. */ - len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); + len = sizeof(req.trailer.session_req); + req.type = RFC1002_SESSION_REQUEST; + req.flags = 0; + req.length = cpu_to_be16(len); + len += offsetof(typeof(req), trailer.session_req); + iov.iov_base = &req; + iov.iov_len = len; + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, len); + rc = smb_send_kvec(server, &msg, &sent); + if (rc < 0 || len != sent) + return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED;
- smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); - rc = smb_send(server, smb_buf, len); /* * RFC1001 layer in at least one server requires very short break before * negprot presumably because not expecting negprot to follow so fast. @@ -3105,7 +3115,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) */ usleep_range(1000, 2000);
- return rc; + return 0; }
static int @@ -3957,11 +3967,13 @@ int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { + bool in_retry = false; int rc = 0;
if (!server->ops->need_neg || !server->ops->negotiate) return -ENOSYS;
+retry: /* only send once per connect */ spin_lock(&server->srv_lock); if (server->tcpStatus != CifsGood && @@ -3981,6 +3993,14 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server); + if (rc == -EAGAIN) { + /* Allow one retry attempt */ + if (!in_retry) { + in_retry = true; + goto retry; + } + rc = -EHOSTDOWN; + } if (rc == 0) { spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 69cca4f17dba..8b70d92f4845 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -1058,6 +1058,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, int i, opt; bool is_smb3 = !strcmp(fc->fs_type->name, "smb3"); bool skip_parsing = false; + char *hostname;
cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
@@ -1267,6 +1268,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, case Opt_rsize: ctx->rsize = result.uint_32; ctx->got_rsize = true; + ctx->vol_rsize = ctx->rsize; break; case Opt_wsize: ctx->wsize = result.uint_32; @@ -1282,6 +1284,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->wsize, PAGE_SIZE); } } + ctx->vol_wsize = ctx->wsize; break; case Opt_acregmax: if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { @@ -1388,6 +1391,16 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, cifs_errorf(fc, "OOM when copying UNC string\n"); goto cifs_parse_mount_err; } + hostname = extract_hostname(ctx->UNC); + if (IS_ERR(hostname)) { + cifs_errorf(fc, "Cannot extract hostname from UNC string\n"); + goto cifs_parse_mount_err; + } + /* last byte, type, is 0x20 for servr type */ + memset(ctx->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL); + for (i = 0; i < RFC1001_NAME_LEN && hostname[i] != 0; i++) + ctx->target_rfc1001_name[i] = toupper(hostname[i]); + kfree(hostname); break; case Opt_user: kfree(ctx->username); diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h index ac6baa774ad3..c7e00025518f 100644 --- a/fs/smb/client/fs_context.h +++ b/fs/smb/client/fs_context.h @@ -263,6 +263,9 @@ struct smb3_fs_context { bool use_client_guid:1; /* reuse existing guid for multichannel */ u8 client_guid[SMB2_CLIENT_GUID_SIZE]; + /* User-specified original r/wsize value */ + unsigned int vol_rsize; + unsigned int vol_wsize; unsigned int bsize; unsigned int rasize; unsigned int rsize; diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c index 47ddeb7fa111..aa45ef6ae99a 100644 --- a/fs/smb/client/link.c +++ b/fs/smb/client/link.c @@ -257,7 +257,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; int buf_type = CIFS_NO_BUFFER; - FILE_ALL_INFO file_info; + struct cifs_open_info_data query_data;
oparms = (struct cifs_open_parms) { .tcon = tcon, @@ -269,11 +269,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, .fid = &fid, };
- rc = CIFS_open(xid, &oparms, &oplock, &file_info); + rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &query_data); if (rc) return rc;
- if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { + if (query_data.fi.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { rc = -ENOENT; /* it's not a symlink */ goto out; @@ -312,7 +312,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, .fid = &fid, };
- rc = CIFS_open(xid, &oparms, &oplock, NULL); + rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL); if (rc) return rc;
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c index 50f96259d9ad..787d6bcb5d1d 100644 --- a/fs/smb/client/readdir.c +++ b/fs/smb/client/readdir.c @@ -733,7 +733,10 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, else cifs_buf_release(cfile->srch_inf. ntwrk_buf_start); + /* Reset all pointers to the network buffer to prevent stale references */ cfile->srch_inf.ntwrk_buf_start = NULL; + cfile->srch_inf.srch_entries_start = NULL; + cfile->srch_inf.last_entry = NULL; } rc = initiate_cifs_search(xid, file, full_path); if (rc) { @@ -756,11 +759,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, rc = server->ops->query_dir_next(xid, tcon, &cfile->fid, search_flags, &cfile->srch_inf); + if (rc) + return -ENOENT; /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); - if (rc) - return -ENOENT; } if (index_to_find < cfile->srch_inf.index_of_last_entry) { /* we found the buffer that contains the entry */ diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c index 55cceb822932..0385a514f59e 100644 --- a/fs/smb/client/smb1ops.c +++ b/fs/smb/client/smb1ops.c @@ -426,13 +426,6 @@ cifs_negotiate(const unsigned int xid, { int rc; rc = CIFSSMBNegotiate(xid, ses, server); - if (rc == -EAGAIN) { - /* retry only once on 1st time connection */ - set_credits(server, 1); - rc = CIFSSMBNegotiate(xid, ses, server); - if (rc == -EAGAIN) - rc = -EHOSTDOWN; - } return rc; }
@@ -444,8 +437,8 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) unsigned int wsize;
/* start with specified wsize, or default */ - if (ctx->wsize) - wsize = ctx->wsize; + if (ctx->got_wsize) + wsize = ctx->vol_wsize; else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) wsize = CIFS_DEFAULT_IOSIZE; else @@ -497,7 +490,7 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) else defsize = server->maxBuf - sizeof(READ_RSP);
- rsize = ctx->rsize ? ctx->rsize : defsize; + rsize = ctx->got_rsize ? ctx->vol_rsize : defsize;
/* * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to @@ -548,24 +541,104 @@ static int cifs_query_path_info(const unsigned int xid, const char *full_path, struct cifs_open_info_data *data) { - int rc; + int rc = -EOPNOTSUPP; FILE_ALL_INFO fi = {}; + struct cifs_search_info search_info = {}; + bool non_unicode_wildcard = false;
data->reparse_point = false; data->adjust_tz = false;
- /* could do find first instead but this returns more info */ - rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls, - cifs_remap(cifs_sb)); /* - * BB optimize code so we do not make the above call when server claims - * no NT SMB support and the above call failed at least once - set flag - * in tcon or mount. + * First try CIFSSMBQPathInfo() function which returns more info + * (NumberOfLinks) than CIFSFindFirst() fallback function. + * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over + * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over + * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB + * Open command on non-NT servers works only for files, does not work + * for directories. And moreover Win9x SMB server returns bogus data in + * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers, + * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function. + */ + if (tcon->ses->capabilities & CAP_NT_SMBS) + rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, + cifs_sb->local_nls, cifs_remap(cifs_sb)); + + /* + * Non-UNICODE variant of fallback functions below expands wildcards, + * so they cannot be used for querying paths with wildcard characters. */ - if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { + if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?"><")) + non_unicode_wildcard = true; + + /* + * Then fallback to CIFSFindFirst() which works also with non-NT servers + * but does not does not provide NumberOfLinks. + */ + if ((rc == -EOPNOTSUPP || rc == -EINVAL) && + !non_unicode_wildcard) { + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) + search_info.info_level = SMB_FIND_FILE_INFO_STANDARD; + else + search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO; + rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL, + CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END, + &search_info, false); + if (rc == 0) { + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) { + FIND_FILE_STANDARD_INFO *di; + int offset = tcon->ses->server->timeAdj; + + di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start; + fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( + di->CreationDate, di->CreationTime, offset))); + fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( + di->LastAccessDate, di->LastAccessTime, offset))); + fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( + di->LastWriteDate, di->LastWriteTime, offset))); + fi.ChangeTime = fi.LastWriteTime; + fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes)); + fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize)); + fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize)); + } else { + FILE_FULL_DIRECTORY_INFO *di; + + di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start; + fi.CreationTime = di->CreationTime; + fi.LastAccessTime = di->LastAccessTime; + fi.LastWriteTime = di->LastWriteTime; + fi.ChangeTime = di->ChangeTime; + fi.Attributes = di->ExtFileAttributes; + fi.AllocationSize = di->AllocationSize; + fi.EndOfFile = di->EndOfFile; + fi.EASize = di->EaSize; + } + fi.NumberOfLinks = cpu_to_le32(1); + fi.DeletePending = 0; + fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY); + cifs_buf_release(search_info.ntwrk_buf_start); + } else if (!full_path[0]) { + /* + * CIFSFindFirst() does not work on root path if the + * root path was exported on the server from the top + * level path (drive letter). + */ + rc = -EOPNOTSUPP; + } + } + + /* + * If everything failed then fallback to the legacy SMB command + * SMB_COM_QUERY_INFORMATION which works with all servers, but + * provide just few information. + */ + if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) { rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls, cifs_remap(cifs_sb)); data->adjust_tz = true; + } else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) { + /* Path with non-UNICODE wildcard character cannot exist. */ + rc = -ENOENT; }
if (!rc) { @@ -662,6 +735,13 @@ static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, int rc; FILE_ALL_INFO fi = {};
+ /* + * CIFSSMBQFileInfo() for non-NT servers returns bogus data in + * Attributes fields. So do not use this command for non-NT servers. + */ + if (!(tcon->ses->capabilities & CAP_NT_SMBS)) + return -EOPNOTSUPP; + if (cfile->symlink_target) { data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!data->symlink_target) @@ -832,6 +912,9 @@ smb_set_file_info(struct inode *inode, const char *full_path, struct cifs_fid fid; struct cifs_open_parms oparms; struct cifsFileInfo *open_file; + FILE_BASIC_INFO new_buf; + struct cifs_open_info_data query_data; + __le64 write_time = buf->LastWriteTime; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = NULL; @@ -839,20 +922,58 @@ smb_set_file_info(struct inode *inode, const char *full_path,
/* if the file is already open for write, just use that fileid */ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); + if (open_file) { fid.netfid = open_file->fid.netfid; netpid = open_file->pid; tcon = tlink_tcon(open_file->tlink); - goto set_via_filehandle; + } else { + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) { + rc = PTR_ERR(tlink); + tlink = NULL; + goto out; + } + tcon = tlink_tcon(tlink); }
- tlink = cifs_sb_tlink(cifs_sb); - if (IS_ERR(tlink)) { - rc = PTR_ERR(tlink); - tlink = NULL; - goto out; + /* + * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO + * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers + * interprets zero time value as do not change existing value on server. + * API of ->set_file_info() callback expects that zero time value has + * the NT meaning - do not change. Therefore if server is non-NT and + * some time values in "buf" are zero, then fetch missing time values. + */ + if (!(tcon->ses->capabilities & CAP_NT_SMBS) && + (!buf->CreationTime || !buf->LastAccessTime || + !buf->LastWriteTime || !buf->ChangeTime)) { + rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data); + if (rc) { + if (open_file) { + cifsFileInfo_put(open_file); + open_file = NULL; + } + goto out; + } + /* + * Original write_time from buf->LastWriteTime is preserved + * as SMBSetInformation() interprets zero as do not change. + */ + new_buf = *buf; + buf = &new_buf; + if (!buf->CreationTime) + buf->CreationTime = query_data.fi.CreationTime; + if (!buf->LastAccessTime) + buf->LastAccessTime = query_data.fi.LastAccessTime; + if (!buf->LastWriteTime) + buf->LastWriteTime = query_data.fi.LastWriteTime; + if (!buf->ChangeTime) + buf->ChangeTime = query_data.fi.ChangeTime; } - tcon = tlink_tcon(tlink); + + if (open_file) + goto set_via_filehandle;
rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, cifs_sb); @@ -873,8 +994,45 @@ smb_set_file_info(struct inode *inode, const char *full_path, .fid = &fid, };
- cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); - rc = CIFS_open(xid, &oparms, &oplock, NULL); + if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) { + /* Opening directory path is not possible on non-NT servers. */ + rc = -EOPNOTSUPP; + } else { + /* + * Use cifs_open_file() instead of CIFS_open() as the + * cifs_open_file() selects the correct function which + * works also on non-NT servers. + */ + rc = cifs_open_file(xid, &oparms, &oplock, NULL); + /* + * Opening path for writing on non-NT servers is not + * possible when the read-only attribute is already set. + * Non-NT server in this case returns -EACCES. For those + * servers the only possible way how to clear the read-only + * bit is via SMB_COM_SETATTR command. + */ + if (rc == -EACCES && + (cinode->cifsAttrs & ATTR_READONLY) && + le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */ + !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) && + !(tcon->ses->capabilities & CAP_NT_SMBS)) + rc = -EOPNOTSUPP; + } + + /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */ + if (rc == -EOPNOTSUPP) { + cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n"); + rc = SMBSetInformation(xid, tcon, full_path, + buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs), + write_time, + cifs_sb->local_nls, cifs_sb); + if (rc == 0) + cinode->cifsAttrs = le32_to_cpu(buf->Attributes); + else + rc = -EACCES; + goto out; + } + if (rc != 0) { if (rc == -EIO) rc = -EINVAL; @@ -882,6 +1040,7 @@ smb_set_file_info(struct inode *inode, const char *full_path, }
netpid = current->tgid; + cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n");
set_via_filehandle: rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid); @@ -892,6 +1051,21 @@ smb_set_file_info(struct inode *inode, const char *full_path, CIFSSMBClose(xid, tcon, fid.netfid); else cifsFileInfo_put(open_file); + + /* + * Setting the read-only bit is not honered on non-NT servers when done + * via open-semantics. So for setting it, use SMB_COM_SETATTR command. + * This command works only after the file is closed, so use it only when + * operation was called without the filehandle. + */ + if (open_file == NULL && + !(tcon->ses->capabilities & CAP_NT_SMBS) && + le32_to_cpu(buf->Attributes) & ATTR_READONLY) { + SMBSetInformation(xid, tcon, full_path, + buf->Attributes, + 0 /* do not change write time */, + cifs_sb->local_nls, cifs_sb); + } out: if (tlink != NULL) cifs_put_tlink(tlink); diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index e836bc2193dd..b313c128ffba 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -107,16 +107,25 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 int err_buftype = CIFS_NO_BUFFER; struct cifs_fid *fid = oparms->fid; struct network_resiliency_req nr_ioctl_req; + bool retry_without_read_attributes = false;
smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb); if (smb2_path == NULL) return -ENOMEM;
- oparms->desired_access |= FILE_READ_ATTRIBUTES; + if (!(oparms->desired_access & FILE_READ_ATTRIBUTES)) { + oparms->desired_access |= FILE_READ_ATTRIBUTES; + retry_without_read_attributes = true; + } smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); + if (rc == -EACCES && retry_without_read_attributes) { + oparms->desired_access &= ~FILE_READ_ATTRIBUTES; + rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, + &err_buftype); + } if (rc && data) { struct smb2_hdr *hdr = err_iov.iov_base;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 590b70d71694..74bcc51ccd32 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -464,12 +464,20 @@ smb2_negotiate(const unsigned int xid, server->CurrentMid = 0; spin_unlock(&server->mid_lock); rc = SMB2_negotiate(xid, ses, server); - /* BB we probably don't need to retry with modern servers */ - if (rc == -EAGAIN) - rc = -EHOSTDOWN; return rc; }
+static inline unsigned int +prevent_zero_iosize(unsigned int size, const char *type) +{ + if (size == 0) { + cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n", + type, CIFS_MIN_DEFAULT_IOSIZE); + return CIFS_MIN_DEFAULT_IOSIZE; + } + return size; +} + static unsigned int smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { @@ -477,12 +485,12 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) unsigned int wsize;
/* start with specified wsize, or default */ - wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE; + wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
- return wsize; + return prevent_zero_iosize(wsize, "w"); }
static unsigned int @@ -492,7 +500,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) unsigned int wsize;
/* start with specified wsize, or default */ - wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE; + wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE; wsize = min_t(unsigned int, wsize, server->max_write); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { @@ -514,7 +522,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
- return wsize; + return prevent_zero_iosize(wsize, "w"); }
static unsigned int @@ -524,13 +532,13 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) unsigned int rsize;
/* start with specified rsize, or default */ - rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE; + rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read);
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
- return rsize; + return prevent_zero_iosize(rsize, "r"); }
static unsigned int @@ -540,7 +548,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) unsigned int rsize;
/* start with specified rsize, or default */ - rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE; + rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { @@ -563,7 +571,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
- return rsize; + return prevent_zero_iosize(rsize, "r"); }
/* diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c index 91812150186c..9f13a705f7f6 100644 --- a/fs/smb/client/transport.c +++ b/fs/smb/client/transport.c @@ -179,7 +179,7 @@ delete_mid(struct mid_q_entry *mid) * Our basic "send data to server" function. Should be called with srv_mutex * held. The caller is responsible for handling the results. */ -static int +int smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, size_t *sent) { diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h index 9f272cc8f566..0a4ca286f416 100644 --- a/fs/smb/common/smb2pdu.h +++ b/fs/smb/common/smb2pdu.h @@ -95,6 +95,9 @@ */ #define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+/* According to MS-SMB2 specification The minimum recommended value is 65536.*/ +#define CIFS_MIN_DEFAULT_IOSIZE (65536) + /* * SMB2 Header Definition * diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index f0760d786502..08d9a7cfba8c 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1448,7 +1448,7 @@ static int ntlm_authenticate(struct ksmbd_work *work, { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; - struct channel *chann = NULL; + struct channel *chann = NULL, *old; struct ksmbd_user *user; u64 prev_id; int sz, rc; @@ -1560,7 +1560,12 @@ static int ntlm_authenticate(struct ksmbd_work *work, return -ENOMEM;
chann->conn = conn; - xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP); + old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann, + KSMBD_DEFAULT_GFP); + if (xa_is_err(old)) { + kfree(chann); + return xa_err(old); + } } }
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index e059316be36f..59ae63ab8685 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -426,10 +426,15 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n", *pos, count);
+ if (*pos >= XATTR_SIZE_MAX) { + pr_err("stream write position %lld is out of bounds\n", *pos); + return -EINVAL; + } + size = *pos + count; if (size > XATTR_SIZE_MAX) { size = XATTR_SIZE_MAX; - count = (*pos + count) - XATTR_SIZE_MAX; + count = XATTR_SIZE_MAX - *pos; }
v_len = ksmbd_vfs_getcasexattr(idmap, @@ -443,13 +448,6 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, goto out; }
- if (v_len <= *pos) { - pr_err("stream write position %lld is out of bounds (stream length: %zd)\n", - *pos, v_len); - err = -EINVAL; - goto out; - } - if (v_len < size) { wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP); if (!wbuf) { diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 2d5ea9f9ff43..6692253f0b5b 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -132,6 +132,7 @@ struct ahash_request { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. + * @reqsize: Size of the request context. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -148,6 +149,8 @@ struct ahash_alg { void (*exit_tfm)(struct crypto_ahash *tfm); int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
+ unsigned int reqsize; + struct hash_alg_common halg; };
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 31ca88deb10d..1ded9a8d4e84 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -376,8 +376,27 @@ struct drm_atomic_state { * * Allow full modeset. This is used by the ATOMIC IOCTL handler to * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should - * never consult this flag, instead looking at the output of - * drm_atomic_crtc_needs_modeset(). + * generally not consult this flag, but instead look at the output of + * drm_atomic_crtc_needs_modeset(). The detailed rules are: + * + * - Drivers must not consult @allow_modeset in the atomic commit path. + * Use drm_atomic_crtc_needs_modeset() instead. + * + * - Drivers must consult @allow_modeset before adding unrelated struct + * drm_crtc_state to this commit by calling + * drm_atomic_get_crtc_state(). See also the warning in the + * documentation for that function. + * + * - Drivers must never change this flag, it is under the exclusive + * control of userspace. + * + * - Drivers may consult @allow_modeset in the atomic check path, if + * they have the choice between an optimal hardware configuration + * which requires a modeset, and a less optimal configuration which + * can be committed without a modeset. An example would be suboptimal + * scanout FIFO allocation resulting in increased idle power + * consumption. This allows userspace to avoid flickering and delays + * for the normal composition loop at reasonable cost. */ bool allow_modeset : 1; /** diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index d8b86df2ec0d..ff251745de18 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,6 +35,7 @@ */
#include <linux/kref.h> +#include <linux/dma-buf.h> #include <linux/dma-resv.h> #include <linux/list.h> #include <linux/mutex.h> @@ -570,6 +571,18 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje return (obj->handle_count > 1) || obj->dma_buf; }
+/** + * drm_gem_is_imported() - Tests if GEM object's buffer has been imported + * @obj: the GEM object + * + * Returns: + * True if the GEM object's buffer has been imported, false otherwise + */ +static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) +{ + return !!obj->import_attach; +} + #ifdef CONFIG_LOCKDEP /** * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index ce91d9b2acb9..7e029c82ae45 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -111,6 +111,7 @@ struct bpf_prog_list { struct bpf_prog *prog; struct bpf_cgroup_link *link; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + u32 flags; };
int cgroup_bpf_inherit(struct cgroup *cgrp); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 932139c5d46f..ffcd76d97770 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -223,6 +223,8 @@ void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); +struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev, + sector_t block, unsigned size); struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block, unsigned size, gfp_t gfp); void __brelse(struct buffer_head *); @@ -398,6 +400,12 @@ sb_find_get_block(struct super_block *sb, sector_t block) return __find_get_block(sb->s_bdev, block, sb->s_blocksize); }
+static inline struct buffer_head * +sb_find_get_block_nonatomic(struct super_block *sb, sector_t block) +{ + return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize); +} + static inline void map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) { diff --git a/include/linux/device.h b/include/linux/device.h index 667cb6db9019..39120b172992 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -26,9 +26,9 @@ #include <linux/atomic.h> #include <linux/uidgid.h> #include <linux/gfp.h> -#include <linux/overflow.h> #include <linux/device/bus.h> #include <linux/device/class.h> +#include <linux/device/devres.h> #include <linux/device/driver.h> #include <linux/cleanup.h> #include <asm/device.h> @@ -281,123 +281,6 @@ int __must_check device_create_bin_file(struct device *dev, void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr);
-/* device resource management */ -typedef void (*dr_release_t)(struct device *dev, void *res); -typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); - -void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name) __malloc; -#define devres_alloc(release, size, gfp) \ - __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) -#define devres_alloc_node(release, size, gfp, nid) \ - __devres_alloc_node(release, size, gfp, nid, #release) - -void devres_for_each_res(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data, - void (*fn)(struct device *, void *, void *), - void *data); -void devres_free(void *res); -void devres_add(struct device *dev, void *res); -void *devres_find(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -void *devres_get(struct device *dev, void *new_res, - dr_match_t match, void *match_data); -void *devres_remove(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -int devres_destroy(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -int devres_release(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); - -/* devres group */ -void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); -void devres_close_group(struct device *dev, void *id); -void devres_remove_group(struct device *dev, void *id); -int devres_release_group(struct device *dev, void *id); - -/* managed devm_k.alloc/kfree for device drivers */ -void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2); -void *devm_krealloc(struct device *dev, void *ptr, size_t size, - gfp_t gfp) __must_check __realloc_size(3); -__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, - const char *fmt, va_list ap) __malloc; -__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, - const char *fmt, ...) __malloc; -static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) -{ - return devm_kmalloc(dev, size, gfp | __GFP_ZERO); -} -static inline void *devm_kmalloc_array(struct device *dev, - size_t n, size_t size, gfp_t flags) -{ - size_t bytes; - - if (unlikely(check_mul_overflow(n, size, &bytes))) - return NULL; - - return devm_kmalloc(dev, bytes, flags); -} -static inline void *devm_kcalloc(struct device *dev, - size_t n, size_t size, gfp_t flags) -{ - return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); -} -static inline __realloc_size(3, 4) void * __must_check -devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags) -{ - size_t bytes; - - if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) - return NULL; - - return devm_krealloc(dev, p, bytes, flags); -} - -void devm_kfree(struct device *dev, const void *p); -char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; -const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); -void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) - __realloc_size(3); - -unsigned long devm_get_free_pages(struct device *dev, - gfp_t gfp_mask, unsigned int order); -void devm_free_pages(struct device *dev, unsigned long addr); - -#ifdef CONFIG_HAS_IOMEM -void __iomem *devm_ioremap_resource(struct device *dev, - const struct resource *res); -void __iomem *devm_ioremap_resource_wc(struct device *dev, - const struct resource *res); - -void __iomem *devm_of_iomap(struct device *dev, - struct device_node *node, int index, - resource_size_t *size); -#else - -static inline -void __iomem *devm_ioremap_resource(struct device *dev, - const struct resource *res) -{ - return ERR_PTR(-EINVAL); -} - -static inline -void __iomem *devm_ioremap_resource_wc(struct device *dev, - const struct resource *res) -{ - return ERR_PTR(-EINVAL); -} - -static inline -void __iomem *devm_of_iomap(struct device *dev, - struct device_node *node, int index, - resource_size_t *size) -{ - return ERR_PTR(-EINVAL); -} - -#endif - /* allows to add/remove a custom action to devres stack */ void devm_remove_action(struct device *dev, void (*action)(void *), void *data); void devm_release_action(struct device *dev, void (*action)(void *), void *data); diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h new file mode 100644 index 000000000000..9b49f9915850 --- /dev/null +++ b/include/linux/device/devres.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DEVICE_DEVRES_H_ +#define _DEVICE_DEVRES_H_ + +#include <linux/err.h> +#include <linux/gfp_types.h> +#include <linux/numa.h> +#include <linux/overflow.h> +#include <linux/stdarg.h> +#include <linux/types.h> + +struct device; +struct device_node; +struct resource; + +/* device resource management */ +typedef void (*dr_release_t)(struct device *dev, void *res); +typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); + +void * __malloc +__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name); +#define devres_alloc(release, size, gfp) \ + __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) +#define devres_alloc_node(release, size, gfp, nid) \ + __devres_alloc_node(release, size, gfp, nid, #release) + +void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data); +void devres_free(void *res); +void devres_add(struct device *dev, void *res); +void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); +void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data); +void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); +int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); +int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data); + +/* devres group */ +void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); +void devres_close_group(struct device *dev, void *id); +void devres_remove_group(struct device *dev, void *id); +int devres_release_group(struct device *dev, void *id); + +/* managed devm_k.alloc/kfree for device drivers */ +void * __alloc_size(2) +devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); +void * __must_check __realloc_size(3) +devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp); +static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +{ + return devm_kmalloc(dev, size, gfp | __GFP_ZERO); +} +static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + return devm_kmalloc(dev, bytes, flags); +} +static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags) +{ + return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); +} +static inline __realloc_size(3, 4) void * __must_check +devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) + return NULL; + + return devm_krealloc(dev, p, bytes, flags); +} + +void devm_kfree(struct device *dev, const void *p); + +void * __realloc_size(3) +devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); +static inline void *devm_kmemdup_array(struct device *dev, const void *src, + size_t n, size_t size, gfp_t flags) +{ + return devm_kmemdup(dev, src, size_mul(size, n), flags); +} + +char * __malloc +devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); +char * __printf(3, 0) __malloc +devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); +char * __printf(3, 4) __malloc +devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); + +unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); +void devm_free_pages(struct device *dev, unsigned long addr); + +#ifdef CONFIG_HAS_IOMEM + +void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res); +void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res); + +void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, + resource_size_t *size); +#else + +static inline +void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res) +{ + return IOMEM_ERR_PTR(-EINVAL); +} + +static inline +void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res) +{ + return IOMEM_ERR_PTR(-EINVAL); +} + +static inline +void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, + resource_size_t *size) +{ + return IOMEM_ERR_PTR(-EINVAL); +} + +#endif + +#endif /* _DEVICE_DEVRES_H_ */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1524da363734..22b9099927fa 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -633,10 +633,14 @@ static inline int dma_mmap_wc(struct device *dev, #else #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) -#define dma_unmap_addr(PTR, ADDR_NAME) (0) -#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) -#define dma_unmap_len(PTR, LEN_NAME) (0) -#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) +#define dma_unmap_addr(PTR, ADDR_NAME) \ + ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + do { typeof(PTR) __p __maybe_unused = PTR; } while (0) +#define dma_unmap_len(PTR, LEN_NAME) \ + ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ + do { typeof(PTR) __p __maybe_unused = PTR; } while (0) #endif
#endif /* _LINUX_DMA_MAPPING_H */ diff --git a/include/linux/err.h b/include/linux/err.h index a4dacd745fcf..1d60aa86db53 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -44,6 +44,9 @@ static inline void * __must_check ERR_PTR(long error) /* Return the pointer in the percpu address space. */ #define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+/* Cast an error pointer to __iomem. */ +#define IOMEM_ERR_PTR(error) (__force void __iomem *)ERR_PTR(error) + /** * PTR_ERR - Extract the error code from an error pointer. * @ptr: An error pointer. diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 930a591b9b61..3c3f6996f29f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio, const char *from = kmap_local_folio(folio, offset); size_t chunk = len;
- if (folio_test_highmem(folio) && + if (folio_test_partial_kmap(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); @@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset, char *to = kmap_local_folio(folio, offset); size_t chunk = len;
- if (folio_test_highmem(folio) && + if (folio_test_partial_kmap(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); @@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio, { size_t len = folio_size(folio) - offset;
- if (folio_test_highmem(folio)) { + if (folio_test_partial_kmap(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) { @@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
VM_BUG_ON(offset + len > folio_size(folio));
- if (folio_test_highmem(folio)) { + if (folio_test_partial_kmap(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) { @@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, size_t offset = offset_in_folio(folio, pos); char *from = kmap_local_folio(folio, offset);
- if (folio_test_highmem(folio)) { + if (folio_test_partial_kmap(folio)) { offset = offset_in_page(offset); len = min_t(size_t, len, PAGE_SIZE - offset); } else diff --git a/include/linux/io.h b/include/linux/io.h index 59ec5eea696c..40cb2de73f5e 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -65,8 +65,6 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) } #endif
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) - void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size); void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a6e2aadbb91b..5aeeed22f35b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -207,6 +207,7 @@ struct inet6_cork { struct ipv6_txoptions *opt; u8 hop_limit; u8 tclass; + u8 dontfrag:1; };
/* struct ipv6_pinfo - ipv6 private area */ diff --git a/include/linux/lzo.h b/include/linux/lzo.h index e95c7d1092b2..4d30e3624acd 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h @@ -24,10 +24,18 @@ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */ +int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzorle1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */ +int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f4dfc1871a95..d1c21ad6440d 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -135,6 +135,7 @@ enum axp20x_variants { #define AXP717_IRQ2_STATE 0x4a #define AXP717_IRQ3_STATE 0x4b #define AXP717_IRQ4_STATE 0x4c +#define AXP717_TS_PIN_CFG 0x50 #define AXP717_ICC_CHG_SET 0x62 #define AXP717_ITERM_CHG_SET 0x63 #define AXP717_CV_CHG_SET 0x64 diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 27f42f713c89..86f0f2a25a3d 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1135,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order); void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index df73a2ccc9af..67256e776566 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -147,6 +147,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */ #define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0) +#define ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK \ + GENMASK(31 - ESW_RESERVED_BITS, ESW_ZONE_ID_BITS)
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b744e554f014..db5c9ddef170 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -40,6 +40,8 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+#define MLX5_FS_MAX_POOL_SIZE BIT(30) + enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_NONE, MLX5_FLOW_DESTINATION_TYPE_VPORT, diff --git a/include/linux/mman.h b/include/linux/mman.h index a842783ffa62..03a910246222 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -157,7 +157,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags) return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | +#endif arch_calc_vm_flag_bits(file, flags); }
diff --git a/include/linux/msi.h b/include/linux/msi.h index 59a421fc42bf..63d0e51f7a80 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -165,6 +165,10 @@ struct msi_desc_data { * @dev: Pointer to the device which uses this descriptor * @msg: The last set MSI message cached for reuse * @affinity: Optional pointer to a cpu affinity mask for this descriptor + * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr. + * Only used if iommu_msi_shift != 0 + * @iommu_msi_shift: Indicates how many bits of the original address should be + * preserved when using iommu_msi_iova. * @sysfs_attr: Pointer to sysfs device attribute * * @write_msi_msg: Callback that may be called when the MSI message @@ -183,7 +187,8 @@ struct msi_desc { struct msi_msg msg; struct irq_affinity_desc *affinity; #ifdef CONFIG_IRQ_MSI_IOMMU - const void *iommu_cookie; + u64 iommu_msi_iova : 58; + u64 iommu_msi_shift : 6; #endif #ifdef CONFIG_SYSFS struct device_attribute *sysfs_attrs; @@ -284,28 +289,14 @@ struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
#define msi_desc_to_dev(desc) ((desc)->dev)
-#ifdef CONFIG_IRQ_MSI_IOMMU -static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) -{ - return desc->iommu_cookie; -} - -static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, - const void *iommu_cookie) -{ - desc->iommu_cookie = iommu_cookie; -} -#else -static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) +static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova, + unsigned int msi_shift) { - return NULL; -} - -static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, - const void *iommu_cookie) -{ -} +#ifdef CONFIG_IRQ_MSI_IOMMU + desc->iommu_msi_iova = msi_iova >> msi_shift; + desc->iommu_msi_shift = msi_shift; #endif +}
int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid, struct msi_desc *init_desc); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a9244291f506..4e8c6d0511c5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -573,6 +573,13 @@ FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) PAGEFLAG_FALSE(HighMem, highmem) #endif
+/* Does kmap_local_folio() only allow access to one page of the folio? */ +#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP +#define folio_test_partial_kmap(f) true +#else +#define folio_test_partial_kmap(f) folio_test_highmem(f) +#endif + #ifdef CONFIG_SWAP static __always_inline bool folio_test_swapcache(const struct folio *folio) { diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index 0e8b74e63767..75c6c86cf09d 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -42,6 +42,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features); void pci_disable_pasid(struct pci_dev *pdev); int pci_pasid_features(struct pci_dev *pdev); int pci_max_pasids(struct pci_dev *pdev); +int pci_pasid_status(struct pci_dev *pdev); #else /* CONFIG_PCI_PASID */ static inline int pci_enable_pasid(struct pci_dev *pdev, int features) { return -EINVAL; } @@ -50,6 +51,8 @@ static inline int pci_pasid_features(struct pci_dev *pdev) { return -EINVAL; } static inline int pci_max_pasids(struct pci_dev *pdev) { return -EINVAL; } +static inline int pci_pasid_status(struct pci_dev *pdev) +{ return -EINVAL; } #endif /* CONFIG_PCI_PASID */
#endif /* LINUX_PCI_ATS_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 0997077bcc52..ce64b4b937f0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1063,7 +1063,13 @@ struct perf_output_handle { struct perf_buffer *rb; unsigned long wakeup; unsigned long size; - u64 aux_flags; + union { + u64 flags; /* perf_output*() */ + u64 aux_flags; /* perf_aux_output*() */ + struct { + u64 skip_read : 1; + }; + }; union { void *addr; unsigned long head; diff --git a/include/linux/pnp.h b/include/linux/pnp.h index b7a7158aaf65..23fe3eaf242d 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -290,7 +290,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data) }
struct pnp_fixup { - char id[7]; + char id[8]; void (*quirk_function) (struct pnp_dev *dev); /* fixup function */ };
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index bd69ddc102fb..0844ab328851 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -95,9 +95,9 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void) { - preempt_enable(); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) rcu_read_unlock_strict(); + preempt_enable(); }
static inline int rcu_preempt_depth(void) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 90a684f94776..ae8b5cb475a3 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -104,7 +104,7 @@ extern int rcu_scheduler_active; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); -#ifndef CONFIG_PREEMPTION +#ifndef CONFIG_PREEMPT_RCU void rcu_all_qs(void); #endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4b95663163e0..71ad766932d3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -247,10 +247,7 @@ struct spi_device { static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0, "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
-static inline struct spi_device *to_spi_device(const struct device *dev) -{ - return dev ? container_of(dev, struct spi_device, dev) : NULL; -} +#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev)
/* Most drivers won't need to care about device refcounting */ static inline struct spi_device *spi_dev_get(struct spi_device *spi) diff --git a/include/linux/trace.h b/include/linux/trace.h index fdcd76b7be83..7eaad857dee0 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export) static inline void trace_printk_init_buffers(void) { } -static inline int trace_array_printk(struct trace_array *tr, unsigned long ip, - const char *fmt, ...) +static inline __printf(3, 4) +int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { return 0; } diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 1ef95c0287f0..a93ed5ac3226 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -88,8 +88,8 @@ extern __printf(2, 3) void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern __printf(2, 0) void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); -extern void -trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); +extern __printf(2, 0) +void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt); @@ -113,8 +113,8 @@ static inline __printf(2, 3) void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { } -static inline void -trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) +static inline __printf(2, 0) +void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { }
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h index 33a4c146dc19..2ca60828f28b 100644 --- a/include/linux/usb/r8152.h +++ b/include/linux/usb/r8152.h @@ -30,6 +30,7 @@ #define VENDOR_ID_NVIDIA 0x0955 #define VENDOR_ID_TPLINK 0x2357 #define VENDOR_ID_DLINK 0x2001 +#define VENDOR_ID_DELL 0x413c #define VENDOR_ID_ASUS 0x0b05
#if IS_REACHABLE(CONFIG_USB_RTL8152) diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 8daa0929865c..43343f1586d1 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -821,7 +821,9 @@ struct v4l2_subdev_state { * possible configuration from the remote end, likely calling * this operation as close as possible to stream on time. The * operation shall fail if the pad index it has been called on - * is not valid or in case of unrecoverable failures. + * is not valid or in case of unrecoverable failures. The + * config argument has been memset to 0 just before calling + * the op. * * @set_routing: Enable or disable data connection routes described in the * subdevice routing table. Subdevs that implement this operation diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 941dc62f3027..8a712ca73f2b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -127,6 +127,8 @@ struct wiphy; * even if it is otherwise disabled. * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation * with very low power (VLP), even if otherwise set to NO_IR. + * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel, + * even if otherwise set to NO_IR. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = BIT(0), @@ -155,6 +157,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23), IEEE80211_CHAN_CAN_MONITOR = BIT(24), IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25), + IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26), };
#define IEEE80211_CHAN_NO_HT40 \ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 3b964f8834e7..fee854892bec 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg johannes@sipsolutions.net * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation */
#ifndef MAC80211_H @@ -3803,7 +3803,7 @@ enum ieee80211_reconfig_type { * @was_assoc: set if this call is due to deauth/disassoc * while just having been associated * @link_id: the link id on which the frame will be TX'ed. - * Only used with the mgd_prepare_tx() method. + * 0 for a non-MLO connection. */ struct ieee80211_prep_tx_info { u16 duration; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 83e9ef25b8d0..1484dd15a369 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -233,7 +233,6 @@ struct xfrm_state {
/* Data for encapsulator */ struct xfrm_encap_tmpl *encap; - struct sock __rcu *encap_sk;
/* NAT keepalive */ u32 nat_keepalive_interval; /* seconds */ diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index fe0512116958..555ea3d142a4 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -34,7 +34,7 @@ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj) { if (IS_ERR(uobj)) - return NULL; + return ERR_CAST(uobj); return uobj->object; } #define uobj_get_obj_read(_object, _type, _id, _attrs) \ diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 575e55aa08ca..c1fe6290d04d 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -195,6 +195,7 @@ struct hda_codec { /* beep device */ struct hda_beep *beep; unsigned int beep_mode; + bool beep_just_power_on;
/* widget capabilities cache */ u32 *wcaps; diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 0bf7d25434d7..d9baf24b8ceb 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1428,6 +1428,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif
+void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); + /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index af6b3827fb1d..3b16b0cc1b7a 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1924,7 +1924,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, const struct prelim_ref *oldref, const struct prelim_ref *newref, u64 tree_size), - TP_ARGS(fs_info, newref, oldref, tree_size), + TP_ARGS(fs_info, oldref, newref, tree_size),
TP_STRUCT__entry_btrfs( __field( u64, root_id ) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4a939c90dc2e..552fd633f820 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1206,6 +1206,7 @@ enum bpf_perf_event_type { #define BPF_F_BEFORE (1U << 3) #define BPF_F_AFTER (1U << 4) #define BPF_F_ID (1U << 5) +#define BPF_F_PREORDER (1U << 6) #define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 8c4470742dcd..41048271a066 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -504,9 +504,17 @@ enum iommu_hw_info_type { * IOMMU_HWPT_GET_DIRTY_BITMAP * IOMMU_HWPT_SET_DIRTY_TRACKING * + * @IOMMU_HW_CAP_PCI_PASID_EXEC: Execute Permission Supported, user ignores it + * when the struct + * iommu_hw_info::out_max_pasid_log2 is zero. + * @IOMMU_HW_CAP_PCI_PASID_PRIV: Privileged Mode Supported, user ignores it + * when the struct + * iommu_hw_info::out_max_pasid_log2 is zero. */ enum iommufd_hw_capabilities { IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, + IOMMU_HW_CAP_PCI_PASID_EXEC = 1 << 1, + IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2, };
/** @@ -522,6 +530,9 @@ enum iommufd_hw_capabilities { * iommu_hw_info_type. * @out_capabilities: Output the generic iommu capability info type as defined * in the enum iommu_hw_capabilities. + * @out_max_pasid_log2: Output the width of PASIDs. 0 means no PASID support. + * PCI devices turn to out_capabilities to check if the + * specific capabilities is supported or not. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -545,7 +556,8 @@ struct iommu_hw_info { __u32 data_len; __aligned_u64 data_uptr; __u32 out_data_type; - __u32 __reserved; + __u8 out_max_pasid_log2; + __u8 __reserved[3]; __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f97f5adc8d51..c2d7faf8d87f 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4294,6 +4294,8 @@ enum nl80211_wmm_rule { * otherwise completely disabled. * @NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP: This channel can be used for a * very low power (VLP) AP, despite being NO_IR. + * @NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY: This channel can be active in + * 20 MHz bandwidth, despite being NO_IR. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4338,6 +4340,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT, NL80211_FREQUENCY_ATTR_CAN_MONITOR, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP, + NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY,
/* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4549,31 +4552,34 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed * @NL80211_RRF_ALLOW_6GHZ_VLP_AP: Very low power (VLP) AP can be permitted * despite NO_IR configuration. + * @NL80211_RRF_ALLOW_20MHZ_ACTIVITY: Allow activity in 20 MHz bandwidth, + * despite NO_IR configuration. */ enum nl80211_reg_rule_flags { - NL80211_RRF_NO_OFDM = 1<<0, - NL80211_RRF_NO_CCK = 1<<1, - NL80211_RRF_NO_INDOOR = 1<<2, - NL80211_RRF_NO_OUTDOOR = 1<<3, - NL80211_RRF_DFS = 1<<4, - NL80211_RRF_PTP_ONLY = 1<<5, - NL80211_RRF_PTMP_ONLY = 1<<6, - NL80211_RRF_NO_IR = 1<<7, - __NL80211_RRF_NO_IBSS = 1<<8, - NL80211_RRF_AUTO_BW = 1<<11, - NL80211_RRF_IR_CONCURRENT = 1<<12, - NL80211_RRF_NO_HT40MINUS = 1<<13, - NL80211_RRF_NO_HT40PLUS = 1<<14, - NL80211_RRF_NO_80MHZ = 1<<15, - NL80211_RRF_NO_160MHZ = 1<<16, - NL80211_RRF_NO_HE = 1<<17, - NL80211_RRF_NO_320MHZ = 1<<18, - NL80211_RRF_NO_EHT = 1<<19, - NL80211_RRF_PSD = 1<<20, - NL80211_RRF_DFS_CONCURRENT = 1<<21, - NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22, - NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23, - NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1<<24, + NL80211_RRF_NO_OFDM = 1 << 0, + NL80211_RRF_NO_CCK = 1 << 1, + NL80211_RRF_NO_INDOOR = 1 << 2, + NL80211_RRF_NO_OUTDOOR = 1 << 3, + NL80211_RRF_DFS = 1 << 4, + NL80211_RRF_PTP_ONLY = 1 << 5, + NL80211_RRF_PTMP_ONLY = 1 << 6, + NL80211_RRF_NO_IR = 1 << 7, + __NL80211_RRF_NO_IBSS = 1 << 8, + NL80211_RRF_AUTO_BW = 1 << 11, + NL80211_RRF_IR_CONCURRENT = 1 << 12, + NL80211_RRF_NO_HT40MINUS = 1 << 13, + NL80211_RRF_NO_HT40PLUS = 1 << 14, + NL80211_RRF_NO_80MHZ = 1 << 15, + NL80211_RRF_NO_160MHZ = 1 << 16, + NL80211_RRF_NO_HE = 1 << 17, + NL80211_RRF_NO_320MHZ = 1 << 18, + NL80211_RRF_NO_EHT = 1 << 19, + NL80211_RRF_PSD = 1 << 20, + NL80211_RRF_DFS_CONCURRENT = 1 << 21, + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1 << 22, + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1 << 23, + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1 << 24, + NL80211_RRF_ALLOW_20MHZ_ACTIVITY = 1 << 25, };
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h index 41ff44dfa1db..f52de5ed1b3b 100644 --- a/include/ufs/ufs_quirks.h +++ b/include/ufs/ufs_quirks.h @@ -107,4 +107,10 @@ struct ufs_dev_quirk { */ #define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+/* + * Some ufs devices may need more time to be in hibern8 before exiting. + * Enable this quirk to give it an additional 100us. + */ +#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12) + #endif /* UFS_QUIRKS_H_ */ diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index 6b1247664b35..ecdbe473a49f 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -83,11 +83,11 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) seq_printf(m, "SqMask:\t0x%x\n", sq_mask); seq_printf(m, "SqHead:\t%u\n", sq_head); seq_printf(m, "SqTail:\t%u\n", sq_tail); - seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); + seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head)); seq_printf(m, "CqMask:\t0x%x\n", cq_mask); seq_printf(m, "CqHead:\t%u\n", cq_head); seq_printf(m, "CqTail:\t%u\n", cq_tail); - seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); + seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail)); seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); sq_entries = min(sq_tail - sq_head, ctx->sq_entries); for (i = 0; i < sq_entries; i++) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 8ef0603c07f1..bd3b3f7a6f6c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -630,6 +630,7 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) * to care for a non-real case. */ if (need_resched()) { + ctx->cqe_sentinel = ctx->cqe_cached; io_cq_unlock_post(ctx); mutex_unlock(&ctx->uring_lock); cond_resched(); @@ -874,10 +875,15 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) lockdep_assert(!io_wq_current_is_worker()); lockdep_assert_held(&ctx->uring_lock);
- __io_cq_lock(ctx); - posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + if (!ctx->lockless_cq) { + spin_lock(&ctx->completion_lock); + posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + spin_unlock(&ctx->completion_lock); + } else { + posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + } + ctx->submit_state.cq_flush = true; - __io_cq_unlock_post(ctx); return posted; }
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 7fd9badcfaf8..35b1b585e9cb 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -94,6 +94,7 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req, kmem_cache_free(req_cachep, req); return -EOWNERDEAD; } + req->opcode = IORING_OP_NOP; req->cqe.user_data = user_data; io_req_set_res(req, res, cflags); percpu_ref_get(&ctx->refs); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index b70d0eef8a28..477947456371 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -147,39 +147,6 @@ void bpf_struct_ops_image_free(void *image) }
#define MAYBE_NULL_SUFFIX "__nullable" -#define MAX_STUB_NAME 128 - -/* Return the type info of a stub function, if it exists. - * - * The name of a stub function is made up of the name of the struct_ops and - * the name of the function pointer member, separated by "__". For example, - * if the struct_ops type is named "foo_ops" and the function pointer - * member is named "bar", the stub function name would be "foo_ops__bar". - */ -static const struct btf_type * -find_stub_func_proto(const struct btf *btf, const char *st_op_name, - const char *member_name) -{ - char stub_func_name[MAX_STUB_NAME]; - const struct btf_type *func_type; - s32 btf_id; - int cp; - - cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s", - st_op_name, member_name); - if (cp >= MAX_STUB_NAME) { - pr_warn("Stub function name too long\n"); - return NULL; - } - btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC); - if (btf_id < 0) - return NULL; - func_type = btf_type_by_id(btf, btf_id); - if (!func_type) - return NULL; - - return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */ -}
/* Prepare argument info for every nullable argument of a member of a * struct_ops type. @@ -204,27 +171,42 @@ find_stub_func_proto(const struct btf *btf, const char *st_op_name, static int prepare_arg_info(struct btf *btf, const char *st_ops_name, const char *member_name, - const struct btf_type *func_proto, + const struct btf_type *func_proto, void *stub_func_addr, struct bpf_struct_ops_arg_info *arg_info) { const struct btf_type *stub_func_proto, *pointed_type; const struct btf_param *stub_args, *args; struct bpf_ctx_arg_aux *info, *info_buf; u32 nargs, arg_no, info_cnt = 0; + char ksym[KSYM_SYMBOL_LEN]; + const char *stub_fname; + s32 stub_func_id; u32 arg_btf_id; int offset;
- stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name); - if (!stub_func_proto) - return 0; + stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym); + if (!stub_fname) { + pr_warn("Cannot find the stub function name for the %s in struct %s\n", + member_name, st_ops_name); + return -ENOENT; + } + + stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC); + if (stub_func_id < 0) { + pr_warn("Cannot find the stub function %s in btf\n", stub_fname); + return -ENOENT; + } + + stub_func_proto = btf_type_by_id(btf, stub_func_id); + stub_func_proto = btf_type_by_id(btf, stub_func_proto->type);
/* Check if the number of arguments of the stub function is the same * as the number of arguments of the function pointer. */ nargs = btf_type_vlen(func_proto); if (nargs != btf_type_vlen(stub_func_proto)) { - pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n", - st_ops_name, member_name, member_name, st_ops_name); + pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n", + stub_fname, member_name, st_ops_name); return -EINVAL; }
@@ -254,21 +236,21 @@ static int prepare_arg_info(struct btf *btf, &arg_btf_id); if (!pointed_type || !btf_type_is_struct(pointed_type)) { - pr_warn("stub function %s__%s has %s tagging to an unsupported type\n", - st_ops_name, member_name, MAYBE_NULL_SUFFIX); + pr_warn("stub function %s has %s tagging to an unsupported type\n", + stub_fname, MAYBE_NULL_SUFFIX); goto err_out; }
offset = btf_ctx_arg_offset(btf, func_proto, arg_no); if (offset < 0) { - pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n", - st_ops_name, member_name, arg_no); + pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n", + stub_fname, arg_no); goto err_out; }
if (args[arg_no].type != stub_args[arg_no].type) { - pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n", - arg_no, st_ops_name, member_name); + pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n", + arg_no, stub_fname); goto err_out; }
@@ -325,6 +307,13 @@ static bool is_module_member(const struct btf *btf, u32 id) return !strcmp(btf_name_by_offset(btf, t->name_off), "module"); }
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) +{ + void *func_ptr = *(void **)(st_ops->cfi_stubs + moff); + + return func_ptr ? 0 : -ENOTSUPP; +} + int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, struct btf *btf, struct bpf_verifier_log *log) @@ -388,7 +377,10 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
for_each_member(i, t, member) { const struct btf_type *func_proto; + void **stub_func_addr; + u32 moff;
+ moff = __btf_member_bit_offset(t, member) / 8; mname = btf_name_by_offset(btf, member->name_off); if (!*mname) { pr_warn("anon member in struct %s is not supported\n", @@ -414,7 +406,11 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, func_proto = btf_type_resolve_func_ptr(btf, member->type, NULL); - if (!func_proto) + + /* The member is not a function pointer or + * the function pointer is not supported. + */ + if (!func_proto || bpf_struct_ops_supported(st_ops, moff)) continue;
if (btf_distill_func_proto(log, btf, @@ -426,8 +422,9 @@ int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, goto errout; }
+ stub_func_addr = *(void **)(st_ops->cfi_stubs + moff); err = prepare_arg_info(btf, st_ops->name, mname, - func_proto, + func_proto, stub_func_addr, arg_info + i); if (err) goto errout; @@ -1153,13 +1150,6 @@ void bpf_struct_ops_put(const void *kdata) bpf_map_put(&st_map->map); }
-int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) -{ - void *func_ptr = *(void **)(st_ops->cfi_stubs + moff); - - return func_ptr ? 0 : -ENOTSUPP; -} - static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 025d7e2214ae..c0d606c40195 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -369,7 +369,7 @@ static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) /* count number of elements in the list. * it's slow but the list cannot be long */ -static u32 prog_list_length(struct hlist_head *head) +static u32 prog_list_length(struct hlist_head *head, int *preorder_cnt) { struct bpf_prog_list *pl; u32 cnt = 0; @@ -377,6 +377,8 @@ static u32 prog_list_length(struct hlist_head *head) hlist_for_each_entry(pl, head, node) { if (!prog_list_prog(pl)) continue; + if (preorder_cnt && (pl->flags & BPF_F_PREORDER)) + (*preorder_cnt)++; cnt++; } return cnt; @@ -400,7 +402,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp,
if (flags & BPF_F_ALLOW_MULTI) return true; - cnt = prog_list_length(&p->bpf.progs[atype]); + cnt = prog_list_length(&p->bpf.progs[atype], NULL); WARN_ON_ONCE(cnt > 1); if (cnt == 1) return !!(flags & BPF_F_ALLOW_OVERRIDE); @@ -423,12 +425,12 @@ static int compute_effective_progs(struct cgroup *cgrp, struct bpf_prog_array *progs; struct bpf_prog_list *pl; struct cgroup *p = cgrp; - int cnt = 0; + int i, j, cnt = 0, preorder_cnt = 0, fstart, bstart, init_bstart;
/* count number of effective programs by walking parents */ do { if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) - cnt += prog_list_length(&p->bpf.progs[atype]); + cnt += prog_list_length(&p->bpf.progs[atype], &preorder_cnt); p = cgroup_parent(p); } while (p);
@@ -439,20 +441,34 @@ static int compute_effective_progs(struct cgroup *cgrp, /* populate the array with effective progs */ cnt = 0; p = cgrp; + fstart = preorder_cnt; + bstart = preorder_cnt - 1; do { if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) continue;
+ init_bstart = bstart; hlist_for_each_entry(pl, &p->bpf.progs[atype], node) { if (!prog_list_prog(pl)) continue;
- item = &progs->items[cnt]; + if (pl->flags & BPF_F_PREORDER) { + item = &progs->items[bstart]; + bstart--; + } else { + item = &progs->items[fstart]; + fstart++; + } item->prog = prog_list_prog(pl); bpf_cgroup_storages_assign(item->cgroup_storage, pl->storage); cnt++; } + + /* reverse pre-ordering progs at this cgroup level */ + for (i = bstart + 1, j = init_bstart; i < j; i++, j--) + swap(progs->items[i], progs->items[j]); + } while ((p = cgroup_parent(p)));
*array = progs; @@ -663,7 +679,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp, */ return -EPERM;
- if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) + if (prog_list_length(progs, NULL) >= BPF_CGROUP_MAX_PROGS) return -E2BIG;
pl = find_attach_entry(progs, prog, link, replace_prog, @@ -698,6 +714,7 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
pl->prog = prog; pl->link = link; + pl->flags = flags; bpf_cgroup_storages_assign(pl->storage, storage); cgrp->bpf.flags[atype] = saved_flags;
@@ -1073,7 +1090,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, lockdep_is_held(&cgroup_mutex)); total_cnt += bpf_prog_array_length(effective); } else { - total_cnt += prog_list_length(&cgrp->bpf.progs[atype]); + total_cnt += prog_list_length(&cgrp->bpf.progs[atype], NULL); } }
@@ -1105,7 +1122,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, u32 id;
progs = &cgrp->bpf.progs[atype]; - cnt = min_t(int, prog_list_length(progs), total_cnt); + cnt = min_t(int, prog_list_length(progs, NULL), total_cnt); i = 0; hlist_for_each_entry(pl, progs, node) { prog = prog_list_prog(pl); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index bb3ba8ebaf3d..570e2f723144 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -2223,7 +2223,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_ b = &htab->buckets[i]; rcu_read_lock(); head = &b->head; - hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { + hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) { key = elem->key; if (is_percpu) { /* current cpu value for percpu map */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 977c08457756..ab74a226e3d6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4042,7 +4042,8 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, #define BPF_F_ATTACH_MASK_BASE \ (BPF_F_ALLOW_OVERRIDE | \ BPF_F_ALLOW_MULTI | \ - BPF_F_REPLACE) + BPF_F_REPLACE | \ + BPF_F_PREORDER)
#define BPF_F_ATTACH_MASK_MPROG \ (BPF_F_REPLACE | \ @@ -4606,6 +4607,8 @@ static int bpf_prog_get_info_by_fd(struct file *file, info.recursion_misses = stats.misses;
info.verified_insns = prog->aux->verified_insns; + if (prog->aux->btf) + info.btf_id = btf_obj_id(prog->aux->btf);
if (!bpf_capable()) { info.jited_prog_len = 0; @@ -4752,8 +4755,6 @@ static int bpf_prog_get_info_by_fd(struct file *file, } }
- if (prog->aux->btf) - info.btf_id = btf_obj_id(prog->aux->btf); info.attach_btf_id = prog->aux->attach_btf_id; if (attach_btf) info.attach_btf_obj_id = btf_obj_id(attach_btf); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8656208aa4bb..39a3d750f2ff 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1447,6 +1447,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->callback_unroll_depth = src->callback_unroll_depth; dst_state->used_as_loop_entry = src->used_as_loop_entry; dst_state->may_goto_depth = src->may_goto_depth; + dst_state->loop_entry = src->loop_entry; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { @@ -2987,6 +2988,21 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog, return res ? &res->func_model : NULL; }
+static int add_kfunc_in_insns(struct bpf_verifier_env *env, + struct bpf_insn *insn, int cnt) +{ + int i, ret; + + for (i = 0; i < cnt; i++, insn++) { + if (bpf_pseudo_kfunc_call(insn)) { + ret = add_kfunc_call(env, insn->imm, insn->off); + if (ret < 0) + return ret; + } + } + return 0; +} + static int add_subprog_and_kfunc(struct bpf_verifier_env *env) { struct bpf_subprog_info *subprog = env->subprog_info; @@ -17267,12 +17283,16 @@ static void clean_verifier_state(struct bpf_verifier_env *env, static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) { + struct bpf_verifier_state *loop_entry; struct bpf_verifier_state_list *sl;
sl = *explored_state(env, insn); while (sl) { if (sl->state.branches) goto next; + loop_entry = get_loop_entry(&sl->state); + if (loop_entry && loop_entry->branches) + goto next; if (sl->state.insn_idx != insn || !same_callsites(&sl->state, cur)) goto next; @@ -18701,6 +18721,10 @@ static int do_check(struct bpf_verifier_env *env) return err; break; } else { + if (WARN_ON_ONCE(env->cur_state->loop_entry)) { + verbose(env, "verifier bug: env->cur_state->loop_entry != NULL\n"); + return -EFAULT; + } do_print_state = true; continue; } @@ -19768,7 +19792,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) { struct bpf_subprog_info *subprogs = env->subprog_info; const struct bpf_verifier_ops *ops = env->ops; - int i, cnt, size, ctx_field_size, delta = 0, epilogue_cnt = 0; + int i, cnt, size, ctx_field_size, ret, delta = 0, epilogue_cnt = 0; const int insn_cnt = env->prog->len; struct bpf_insn *epilogue_buf = env->epilogue_buf; struct bpf_insn *insn_buf = env->insn_buf; @@ -19797,6 +19821,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; + + ret = add_kfunc_in_insns(env, epilogue_buf, epilogue_cnt - 1); + if (ret < 0) + return ret; } }
@@ -19817,6 +19845,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
env->prog = new_prog; delta += cnt - 1; + + ret = add_kfunc_in_insns(env, insn_buf, cnt - 1); + if (ret < 0) + return ret; } }
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index e63d6f3b0047..62933468aaf4 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -90,7 +90,7 @@ DEFINE_MUTEX(cgroup_mutex); DEFINE_SPINLOCK(css_set_lock);
-#ifdef CONFIG_PROVE_RCU +#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP) EXPORT_SYMBOL_GPL(cgroup_mutex); EXPORT_SYMBOL_GPL(css_set_lock); #endif diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 3e01781aeb7b..c4ce2f5a9745 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -323,13 +323,11 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) rcu_read_unlock(); }
- /* play nice and yield if necessary */ - if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) { - __cgroup_rstat_unlock(cgrp, cpu); - if (!cond_resched()) - cpu_relax(); - __cgroup_rstat_lock(cgrp, cpu); - } + /* play nice and avoid disabling interrupts for a long time */ + __cgroup_rstat_unlock(cgrp, cpu); + if (!cond_resched()) + cpu_relax(); + __cgroup_rstat_lock(cgrp, cpu); } }
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 864a1121bf08..74d453ec750a 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -897,6 +897,19 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_set_coherent_mask);
+static bool __dma_addressing_limited(struct device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < + dma_get_required_mask(dev)) + return true; + + if (unlikely(ops) || use_dma_iommu(dev)) + return false; + return !dma_direct_all_ram_mapped(dev); +} + /** * dma_addressing_limited - return if the device is addressing limited * @dev: device to check @@ -907,15 +920,11 @@ EXPORT_SYMBOL(dma_set_coherent_mask); */ bool dma_addressing_limited(struct device *dev) { - const struct dma_map_ops *ops = get_dma_ops(dev); - - if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < - dma_get_required_mask(dev)) - return true; - - if (unlikely(ops) || use_dma_iommu(dev)) + if (!__dma_addressing_limited(dev)) return false; - return !dma_direct_all_ram_mapped(dev); + + dev_dbg(dev, "device is DMA addressing limited\n"); + return true; } EXPORT_SYMBOL_GPL(dma_addressing_limited);
diff --git a/kernel/events/core.c b/kernel/events/core.c index edafe9fc4bdd..285a4548450b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1195,6 +1195,12 @@ static void perf_assert_pmu_disabled(struct pmu *pmu) WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0); }
+static inline void perf_pmu_read(struct perf_event *event) +{ + if (event->state == PERF_EVENT_STATE_ACTIVE) + event->pmu->read(event); +} + static void get_ctx(struct perf_event_context *ctx) { refcount_inc(&ctx->refcount); @@ -3482,8 +3488,7 @@ static void __perf_event_sync_stat(struct perf_event *event, * we know the event must be on the current CPU, therefore we * don't need to use it. */ - if (event->state == PERF_EVENT_STATE_ACTIVE) - event->pmu->read(event); + perf_pmu_read(event);
perf_event_update_time(event);
@@ -4634,15 +4639,8 @@ static void __perf_event_read(void *info)
pmu->read(event);
- for_each_sibling_event(sub, event) { - if (sub->state == PERF_EVENT_STATE_ACTIVE) { - /* - * Use sibling's PMU rather than @event's since - * sibling could be on different (eg: software) PMU. - */ - sub->pmu->read(sub); - } - } + for_each_sibling_event(sub, event) + perf_pmu_read(sub);
data->ret = pmu->commit_txn(pmu);
@@ -7408,9 +7406,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running;
- if ((leader != event) && - (leader->state == PERF_EVENT_STATE_ACTIVE)) - leader->pmu->read(leader); + if ((leader != event) && !handle->skip_read) + perf_pmu_read(leader);
values[n++] = perf_event_count(leader, self); if (read_format & PERF_FORMAT_ID) @@ -7423,9 +7420,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, for_each_sibling_event(sub, leader) { n = 0;
- if ((sub != event) && - (sub->state == PERF_EVENT_STATE_ACTIVE)) - sub->pmu->read(sub); + if ((sub != event) && !handle->skip_read) + perf_pmu_read(sub);
values[n++] = perf_event_count(sub, self); if (read_format & PERF_FORMAT_ID) @@ -7484,6 +7480,9 @@ void perf_output_sample(struct perf_output_handle *handle, { u64 sample_type = data->type;
+ if (data->sample_flags & PERF_SAMPLE_READ) + handle->skip_read = 1; + perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IDENTIFIER) @@ -11978,40 +11977,51 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) if (ctx) perf_event_ctx_unlock(event->group_leader, ctx);
- if (!ret) { - if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && - has_extended_regs(event)) - ret = -EOPNOTSUPP; + if (ret) + goto err_pmu;
- if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && - event_has_any_exclude_flag(event)) - ret = -EINVAL; + if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && + has_extended_regs(event)) { + ret = -EOPNOTSUPP; + goto err_destroy; + }
- if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { - const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); - struct cpumask *pmu_cpumask = perf_scope_cpumask(pmu->scope); - int cpu; - - if (pmu_cpumask && cpumask) { - cpu = cpumask_any_and(pmu_cpumask, cpumask); - if (cpu >= nr_cpu_ids) - ret = -ENODEV; - else - event->event_caps |= PERF_EV_CAP_READ_SCOPE; - } else { - ret = -ENODEV; - } - } + if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && + event_has_any_exclude_flag(event)) { + ret = -EINVAL; + goto err_destroy; + } + + if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { + const struct cpumask *cpumask; + struct cpumask *pmu_cpumask; + int cpu;
- if (ret && event->destroy) - event->destroy(event); + cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); + pmu_cpumask = perf_scope_cpumask(pmu->scope); + + ret = -ENODEV; + if (!pmu_cpumask || !cpumask) + goto err_destroy; + + cpu = cpumask_any_and(pmu_cpumask, cpumask); + if (cpu >= nr_cpu_ids) + goto err_destroy; + + event->event_caps |= PERF_EV_CAP_READ_SCOPE; }
- if (ret) { - event->pmu = NULL; - module_put(pmu->module); + return 0; + +err_destroy: + if (event->destroy) { + event->destroy(event); + event->destroy = NULL; }
+err_pmu: + event->pmu = NULL; + module_put(pmu->module); return ret; }
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 6c2cb4e4f48d..8f3f624419aa 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -950,9 +950,10 @@ static int hw_breakpoint_event_init(struct perf_event *bp) return -ENOENT;
/* - * no branch sampling for breakpoint events + * Check if breakpoint type is supported before proceeding. + * Also, no branch sampling for breakpoint events. */ - if (has_branch_stack(bp)) + if (!hw_breakpoint_slots_cached(find_slot_idx(bp->attr.bp_type)) || has_branch_stack(bp)) return -EOPNOTSUPP;
err = register_perf_hw_breakpoint(bp); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index bbfa22c0a159..6ecbbc57cdfd 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -185,6 +185,7 @@ __perf_output_begin(struct perf_output_handle *handle,
handle->rb = rb; handle->event = event; + handle->flags = 0;
have_lost = local_read(&rb->lost); if (unlikely(have_lost)) { diff --git a/kernel/exit.c b/kernel/exit.c index 619f0014c33b..56b8bd9487b4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -742,10 +742,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
tsk->exit_state = EXIT_ZOMBIE; /* - * sub-thread or delay_group_leader(), wake up the - * PIDFD_THREAD waiters. + * Ignore thread-group leaders that exited before all + * subthreads did. */ - if (!thread_group_empty(tsk)) + if (!delay_group_leader(tsk)) do_notify_pidfd(tsk);
if (unlikely(tsk->ptrace)) { diff --git a/kernel/fork.c b/kernel/fork.c index 12decadff468..97c9afe3efc3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -505,10 +505,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) vma_numab_state_init(new); dup_anon_vma_name(orig, new);
- /* track_pfn_copy() will later take care of copying internal state. */ - if (unlikely(new->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(new); - return new; }
@@ -699,6 +695,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, tmp = vm_area_dup(mpnt); if (!tmp) goto fail_nomem; + + /* track_pfn_copy() will later take care of copying internal state. */ + if (unlikely(tmp->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(tmp); + retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; diff --git a/kernel/padata.c b/kernel/padata.c index 22770372bdf3..3e0ef0753e73 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -358,7 +358,8 @@ static void padata_reorder(struct parallel_data *pd) * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. */ padata_get_pd(pd); - queue_work(pinst->serial_wq, &pd->reorder_work); + if (!queue_work(pinst->serial_wq, &pd->reorder_work)) + padata_put_pd(pd); } }
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 881a26e18c65..3a91b739e8f3 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3310,7 +3310,12 @@ void console_unblank(void) */ cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) { + short flags = console_srcu_read_flags(c); + + if (flags & CON_SUSPENDED) + continue; + + if ((flags & CON_ENABLED) && c->unblank) { found_unblank = true; break; } @@ -3347,7 +3352,12 @@ void console_unblank(void)
cookie = console_srcu_read_lock(); for_each_console_srcu(c) { - if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) + short flags = console_srcu_read_flags(c); + + if (flags & CON_SUSPENDED) + continue; + + if ((flags & CON_ENABLED) && c->unblank) c->unblank(); } console_srcu_read_unlock(cookie); diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index feb3ac1dc5d5..f87c9d6d36fc 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -162,7 +162,7 @@ static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s) { unsigned long cur_s = READ_ONCE(*sp);
- return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1)); + return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (3 * RCU_SEQ_STATE_MASK + 1)); }
/* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8e52c1dd0628..4ed863219521 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1822,10 +1822,14 @@ static noinline_for_stack bool rcu_gp_init(void)
/* Advance to a new grace period and initialize state. */ record_gp_stall_check_time(); + /* + * A new wait segment must be started before gp_seq advanced, so + * that previous gp waiters won't observe the new gp_seq. + */ + start_new_poll = rcu_sr_normal_gp_init(); /* Record GP times before starting GP, hence rcu_seq_start(). */ rcu_seq_start(&rcu_state.gp_seq); ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); - start_new_poll = rcu_sr_normal_gp_init(); trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); raw_spin_unlock_irq_rcu_node(rnp); @@ -4183,14 +4187,17 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); */ void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) { - struct rcu_node *rnp = rcu_get_root(); - /* * Any prior manipulation of RCU-protected data must happen * before the loads from ->gp_seq and ->expedited_sequence. */ smp_mb(); /* ^^^ */ - rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq); + + // Yes, rcu_state.gp_seq, not rnp_root->gp_seq, the latter's use + // in poll_state_synchronize_rcu_full() notwithstanding. Use of + // the latter here would result in too-short grace periods due to + // interactions with newly onlined CPUs. + rgosp->rgos_norm = rcu_seq_snap(&rcu_state.gp_seq); rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); } EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1c7cbd145d5e..304e3405e6ec 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -832,8 +832,17 @@ void rcu_read_unlock_strict(void) { struct rcu_data *rdp;
- if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) + if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread) return; + + /* + * rcu_report_qs_rdp() can only be invoked with a stable rdp and + * from the local CPU. + * + * The in_atomic_preempt_off() check ensures that we come here holding + * the last preempt_count (which will get dropped once we return to + * __rcu_read_unlock(). + */ rdp = this_cpu_ptr(&rcu_data); rdp->cpu_no_qs.b.norm = false; rcu_report_qs_rdp(rdp); @@ -974,13 +983,16 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) */ static void rcu_flavor_sched_clock_irq(int user) { - if (user || rcu_is_cpu_rrupt_from_idle()) { + if (user || rcu_is_cpu_rrupt_from_idle() || + (IS_ENABLED(CONFIG_PREEMPT_COUNT) && + (preempt_count() == HARDIRQ_OFFSET))) {
/* * Get here if this CPU took its interrupt from user - * mode or from the idle loop, and if this is not a - * nested interrupt. In this case, the CPU is in - * a quiescent state, so note it. + * mode, from the idle loop without this being a nested + * interrupt, or while not holding the task preempt count + * (with PREEMPT_COUNT=y). In this case, the CPU is in a + * quiescent state, so note it. * * No memory barrier is required here because rcu_qs() * references only CPU-local variables that other CPUs diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 990d0828bf2a..443f6a9ef3f8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -71,10 +71,10 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; /* * Minimal preemption granularity for CPU-bound tasks: * - * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + * (default: 0.70 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_base_slice = 750000ULL; -static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; +unsigned int sysctl_sched_base_slice = 700000ULL; +static unsigned int normalized_sysctl_sched_base_slice = 700000ULL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
diff --git a/kernel/signal.c b/kernel/signal.c index 2ae45e6eb6bb..468b589c39e6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2063,8 +2063,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig) WARN_ON_ONCE(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); /* - * tsk is a group leader and has no threads, wake up the - * non-PIDFD_THREAD waiters. + * Notify for thread-group leaders without subthreads. */ if (thread_group_empty(tsk)) do_notify_pidfd(tsk); diff --git a/kernel/softirq.c b/kernel/softirq.c index 8c4524ce65fa..00ff17635041 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -126,6 +126,18 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), };
+#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key bh_lock_key; +struct lockdep_map bh_lock_map = { + .name = "local_bh", + .key = &bh_lock_key, + .wait_type_outer = LD_WAIT_FREE, + .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */ + .lock_type = LD_LOCK_PERCPU, +}; +EXPORT_SYMBOL_GPL(bh_lock_map); +#endif + /** * local_bh_blocked() - Check for idle whether BH processing is blocked * @@ -148,6 +160,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
WARN_ON_ONCE(in_hardirq());
+ lock_map_acquire_read(&bh_lock_map); + /* First entry of a task into a BH disabled section? */ if (!current->softirq_disable_cnt) { if (preemptible()) { @@ -211,6 +225,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) WARN_ON_ONCE(in_hardirq()); lockdep_assert_irqs_enabled();
+ lock_map_release(&bh_lock_map); + local_irq_save(flags); curcnt = __this_cpu_read(softirq_ctrl.cnt);
@@ -261,6 +277,8 @@ static inline void ksoftirqd_run_begin(void) /* Counterpart to ksoftirqd_run_begin() */ static inline void ksoftirqd_run_end(void) { + /* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */ + lock_map_release(&bh_lock_map); __local_bh_enable(SOFTIRQ_OFFSET, true); WARN_ON_ONCE(in_interrupt()); local_irq_enable(); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index db9c06bb2311..06fbc226341f 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -117,16 +117,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = .csd = CSD_INIT(retrigger_next_event, NULL) };
-static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { - /* Make sure we catch unsupported clockids */ - [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES, - - [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, - [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, - [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, - [CLOCK_TAI] = HRTIMER_BASE_TAI, -}; - static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base) { if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -1597,14 +1587,19 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
static inline int hrtimer_clockid_to_base(clockid_t clock_id) { - if (likely(clock_id < MAX_CLOCKS)) { - int base = hrtimer_clock_to_base_table[clock_id]; - - if (likely(base != HRTIMER_MAX_CLOCK_BASES)) - return base; + switch (clock_id) { + case CLOCK_REALTIME: + return HRTIMER_BASE_REALTIME; + case CLOCK_MONOTONIC: + return HRTIMER_BASE_MONOTONIC; + case CLOCK_BOOTTIME: + return HRTIMER_BASE_BOOTTIME; + case CLOCK_TAI: + return HRTIMER_BASE_TAI; + default: + WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); + return HRTIMER_BASE_MONOTONIC; } - WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id); - return HRTIMER_BASE_MONOTONIC; }
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 4576aaed13b2..c5d9bfbb082b 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -118,6 +118,7 @@ static int posix_timer_add(struct k_itimer *timer) return id; } spin_unlock(&hash_lock); + cond_resched(); } /* POSIX return code when no timer ID could be allocated */ return -EAGAIN; @@ -513,14 +514,21 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event, if (error) goto out;
- spin_lock_irq(¤t->sighand->siglock); - /* This makes the timer valid in the hash table */ - WRITE_ONCE(new_timer->it_signal, current->signal); - hlist_add_head(&new_timer->list, ¤t->signal->posix_timers); - spin_unlock_irq(¤t->sighand->siglock); /* - * After unlocking sighand::siglock @new_timer is subject to - * concurrent removal and cannot be touched anymore + * timer::it_lock ensures that __lock_timer() observes a fully + * initialized timer when it observes a valid timer::it_signal. + * + * sighand::siglock is required to protect signal::posix_timers. + */ + scoped_guard (spinlock_irq, &new_timer->it_lock) { + guard(spinlock)(¤t->sighand->siglock); + /* This makes the timer valid in the hash table */ + WRITE_ONCE(new_timer->it_signal, current->signal); + hlist_add_head(&new_timer->list, ¤t->signal->posix_timers); + } + /* + * After unlocking @new_timer is subject to concurrent removal and + * cannot be touched anymore */ return 0; out: diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 1c311c46da50..cfbb46cc4e76 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -46,7 +46,7 @@ static void print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, int idx, u64 now) { - SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function); + SEQ_printf(m, " #%d: <%p>, %ps", idx, taddr, timer->function); SEQ_printf(m, ", S:%02x", timer->state); SEQ_printf(m, "\n"); SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", @@ -98,7 +98,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, static void print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) { - SEQ_printf(m, " .base: %pK\n", base); + SEQ_printf(m, " .base: %p\n", base); SEQ_printf(m, " .index: %d\n", base->index);
SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 06104c2c66ab..e773b0adcfc0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3343,10 +3343,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) } EXPORT_SYMBOL_GPL(trace_vbprintk);
-__printf(3, 0) -static int -__trace_array_vprintk(struct trace_buffer *buffer, - unsigned long ip, const char *fmt, va_list args) +static __printf(3, 0) +int __trace_array_vprintk(struct trace_buffer *buffer, + unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; @@ -3399,7 +3398,6 @@ __trace_array_vprintk(struct trace_buffer *buffer, return len; }
-__printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { @@ -3429,7 +3427,6 @@ int trace_array_vprintk(struct trace_array *tr, * Note, trace_array_init_printk() must be called on @tr before this * can be used. */ -__printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { @@ -3474,7 +3471,6 @@ int trace_array_init_printk(struct trace_array *tr) } EXPORT_SYMBOL_GPL(trace_array_init_printk);
-__printf(3, 4) int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...) { @@ -3490,7 +3486,6 @@ int trace_array_printk_buf(struct trace_buffer *buffer, return ret; }
-__printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(printk_trace, ip, fmt, args); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 04ea327198ba..82da3ac14024 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -818,13 +818,15 @@ static inline void __init disable_tracing_selftest(const char *reason)
extern void *head_page(struct trace_array_cpu *data); extern unsigned long long ns2usecs(u64 nsec); -extern int -trace_vbprintk(unsigned long ip, const char *fmt, va_list args); -extern int -trace_vprintk(unsigned long ip, const char *fmt, va_list args); -extern int -trace_array_vprintk(struct trace_array *tr, - unsigned long ip, const char *fmt, va_list args); + +__printf(2, 0) +int trace_vbprintk(unsigned long ip, const char *fmt, va_list args); +__printf(2, 0) +int trace_vprintk(unsigned long ip, const char *fmt, va_list args); +__printf(3, 0) +int trace_array_vprintk(struct trace_array *tr, + unsigned long ip, const char *fmt, va_list args); +__printf(3, 4) int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...); void trace_printk_seq(struct trace_seq *s); diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index 2ef2e1b80091..2f844c279a3e 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(vhost_task_stop); * @arg: data to be passed to fn and handled_kill * @name: the thread's name * - * This returns a specialized task for use by the vhost layer or NULL on + * This returns a specialized task for use by the vhost layer or ERR_PTR() on * failure. The returned task is inactive, and the caller must fire it up * through vhost_task_start(). */ diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index e49deddd3de9..7d1dfbb99b39 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL(dql_completed); void dql_reset(struct dql *dql) { /* Reset all dynamic values */ - dql->limit = 0; + dql->limit = dql->min_limit; dql->num_queued = 0; dql->num_completed = 0; dql->last_obj_cnt = 0; diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index 2f58fafbbddd..fc7b2b7ef4b2 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -lzo_compress-objs := lzo1x_compress.o +lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o lzo_decompress-objs := lzo1x_decompress_safe.o
obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 47d6d43ea957..7b10ca86a893 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -18,11 +18,22 @@ #include <linux/lzo.h> #include "lzodefs.h"
-static noinline size_t -lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem, signed char *state_offset, - const unsigned char bitstream_version) +#undef LZO_UNSAFE + +#ifndef LZO_SAFE +#define LZO_UNSAFE 1 +#define LZO_SAFE(name) name +#define HAVE_OP(x) 1 +#endif + +#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun + +static noinline int +LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len, + unsigned char **out, unsigned char *op_end, + size_t *tp, void *wrkmem, + signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; + size_t ti = *tp;
- op = out; + op = *out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; @@ -116,25 +128,32 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, if (t != 0) { if (t <= 3) { op[*state_offset] |= t; + NEED_OP(4); COPY4(op, ii); op += t; } else if (t <= 16) { + NEED_OP(17); *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -151,6 +170,7 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, if (unlikely(run_length)) { ip += run_length; run_length -= MIN_ZERO_RUN_LENGTH; + NEED_OP(4); put_unaligned_le32((run_length << 21) | 0xfffc18 | (run_length & 0x7), op); op += 4; @@ -243,10 +263,12 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, ip += m_len; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; + NEED_OP(2); *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; + NEED_OP(1); if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { @@ -254,14 +276,18 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; + NEED_OP(1); if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); @@ -282,11 +308,14 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { + NEED_OP(1); m_len -= 255; *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } @@ -295,14 +324,20 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, ii = ip; goto next; } - *out_len = op - out; - return in_end - (ii - ti); + *out = op; + *tp = in_end - (ii - ti); + return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; }
-static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem, const unsigned char bitstream_version) +static int LZO_SAFE(lzogeneric1x_1_compress)( + const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem, const unsigned char bitstream_version) { + unsigned char * const op_end = out + *out_len; const unsigned char *ip = in; unsigned char *op = out; unsigned char *data_start; @@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, while (l > 20) { size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; + int err; + if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, - &state_offset, bitstream_version); + err = LZO_SAFE(lzo1x_1_do_compress)( + ip, ll, &op, op_end, &t, wrkmem, + &state_offset, bitstream_version); + if (err != LZO_E_OK) + return err; ip += ll; - op += *out_len; l -= ll; } t += l; @@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, const unsigned char *ii = in + in_len - t;
if (op == data_start && t <= 238) { + NEED_OP(1); *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; } else if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (tt > 255) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, } while (--t > 0); }
+ NEED_OP(3); *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0;
*out_len = op - out; return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; }
-int lzo1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, 0); }
-int lzorle1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, - wrkmem, LZO_VERSION); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, LZO_VERSION); }
-EXPORT_SYMBOL_GPL(lzo1x_1_compress); -EXPORT_SYMBOL_GPL(lzorle1x_1_compress); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress)); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress));
+#ifndef LZO_UNSAFE MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); +#endif diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c new file mode 100644 index 000000000000..371c9f849492 --- /dev/null +++ b/lib/lzo/lzo1x_compress_safe.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * LZO1X Compressor from LZO + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer markus@oberhumer.com + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta nitingupta910@gmail.com + * Richard Purdie rpurdie@openedhand.com + */ + +#define LZO_SAFE(name) name##_safe +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) + +#include "lzo1x_compress.c" diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2d1e402f06f2..dcd2d0e15e13 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1139,7 +1139,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, { struct mem_cgroup *iter; int ret = 0; - int i = 0;
BUG_ON(mem_cgroup_is_root(memcg));
@@ -1149,10 +1148,9 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); while (!ret && (task = css_task_iter_next(&it))) { - /* Avoid potential softlockup warning */ - if ((++i & 1023) == 0) - cond_resched(); ret = fn(task, arg); + /* Avoid potential softlockup warning */ + cond_resched(); } css_task_iter_end(&it); if (ret) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ebe1ec661492..882903f42300 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4381,6 +4381,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, }
retry: + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * infinite retries. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; + /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 358bd3083b88..cc04e501b1c5 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4097,8 +4097,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) * would be a good heuristic for when to shrink the vm_area? */ if (size <= old_size) { - /* Zero out "freed" memory. */ - if (want_init_on_free()) + /* Zero out "freed" memory, potentially for future realloc. */ + if (want_init_on_free() || want_init_on_alloc(flags)) memset((void *)p + size, 0, old_size - size); vm->requested_size = size; kasan_poison_vmalloc(p + size, old_size - size); @@ -4111,10 +4111,13 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) if (size <= alloced_size) { kasan_unpoison_vmalloc(p + old_size, size - old_size, KASAN_VMALLOC_PROT_NORMAL); - /* Zero out "alloced" memory. */ - if (want_init_on_alloc(flags)) - memset((void *)p + old_size, 0, size - old_size); + /* + * No need to zero memory here, as unused memory will have + * already been zeroed at initial allocation time or during + * realloc shrink time. + */ vm->requested_size = size; + return (void *)p; }
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index bc5b42fce2b8..889463340351 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -932,6 +932,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, hdev->sco_pkts = 8; }
+ if (!read_voice_setting_capable(hdev)) + hdev->sco_pkts = 0; + hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c219a8c596d3..66fa5d6fea6c 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1411,7 +1411,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); }
-static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +static bool l2cap_check_enc_key_size(struct hci_conn *hcon, + struct l2cap_chan *chan) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The @@ -1425,7 +1426,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) int min_key_size = hcon->hdev->min_enc_key_size;
/* On FIPS security level, key size must be 16 bytes */ - if (hcon->sec_level == BT_SECURITY_FIPS) + if (chan->sec_level == BT_SECURITY_FIPS) min_key_size = 16;
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || @@ -1453,7 +1454,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) !__l2cap_no_conn_pending(chan)) return;
- if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -1528,7 +1529,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; }
- if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); @@ -3957,7 +3958,7 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && (!hci_conn_check_link_mode(conn->hcon) || - !l2cap_check_enc_key_size(conn->hcon))) { + !l2cap_check_enc_key_size(conn->hcon, pchan))) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; @@ -7317,7 +7318,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) }
if (chan->state == BT_CONNECT) { - if (!status && l2cap_check_enc_key_size(hcon)) + if (!status && l2cap_check_enc_key_size(hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -7327,7 +7328,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat;
- if (!status && l2cap_check_enc_key_size(hcon)) { + if (!status && l2cap_check_enc_key_size(hcon, chan)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 1a52a0bca086..7e1ad229e133 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -1040,7 +1040,7 @@ static int br_mdb_add_group(const struct br_mdb_config *cfg,
/* host join */ if (!port) { - if (mp->host_joined) { + if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) { NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host"); return -EEXIST; } diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 98aea5485aae..a8c67035e23c 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ -static const u32 br_dst_default_metrics[RTAX_MAX] = { - [RTAX_MTU - 1] = 1500, -}; - void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable;
rcuref_init(&rt->dst.__rcuref, 1); rt->dst.dev = br->dev; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + dst_init_metrics(&rt->dst, br->metrics, false); + dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 041f6e571a20..df502cc1191c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -505,6 +505,7 @@ struct net_bridge { struct rtable fake_rtable; struct rt6_info fake_rt6_info; }; + u32 metrics[RTAX_MAX]; #endif u16 group_fwd_mask; u16 group_fwd_mask_required; diff --git a/net/can/bcm.c b/net/can/bcm.c index 217049fa496e..e33ff2a5b20c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -58,6 +58,7 @@ #include <linux/can/skb.h> #include <linux/can/bcm.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <net/sock.h> #include <net/net_namespace.h>
@@ -122,6 +123,7 @@ struct bcm_op { struct canfd_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; + spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ };
struct bcm_sock { @@ -217,7 +219,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n");
- list_for_each_entry(op, &bo->rx_ops, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(op, &bo->rx_ops, list) {
unsigned long reduction;
@@ -273,6 +277,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); + + rcu_read_unlock(); + return 0; } #endif /* CONFIG_PROC_FS */ @@ -285,13 +292,18 @@ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; - struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; + struct canfd_frame *cf; int err;
/* no target device? => exit */ if (!op->ifindex) return;
+ /* read currframe under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + cf = op->frames + op->cfsiz * op->currframe; + spin_unlock_bh(&op->bcm_tx_lock); + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ @@ -312,6 +324,10 @@ static void bcm_can_tx(struct bcm_op *op) skb->dev = dev; can_skb_set_owner(skb, op->sk); err = can_send(skb, 1); + + /* update currframe and count under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + if (!err) op->frames_abs++;
@@ -320,6 +336,11 @@ static void bcm_can_tx(struct bcm_op *op) /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; + + if (op->count > 0) + op->count--; + + spin_unlock_bh(&op->bcm_tx_lock); out: dev_put(dev); } @@ -430,7 +451,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) struct bcm_msg_head msg_head;
if (op->kt_ival1 && (op->count > 0)) { - op->count--; + bcm_can_tx(op); if (!op->count && (op->flags & TX_COUNTEVT)) {
/* create notification to user */ @@ -445,7 +466,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
bcm_send_to_user(op, &msg_head, NULL, 0); } - bcm_can_tx(op);
} else if (op->kt_ival2) { bcm_can_tx(op); @@ -843,7 +863,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, REGMASK(op->can_id), bcm_rx_handler, op);
- list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -863,7 +883,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -956,6 +976,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } op->flags = msg_head->flags;
+ /* only lock for unlikely count/nframes/currframe changes */ + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX || + op->flags & SETTIMER) { + + spin_lock_bh(&op->bcm_tx_lock); + + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX) { + /* potentially update changed nframes */ + op->nframes = msg_head->nframes; + /* restart multiple frame transmission */ + op->currframe = 0; + } + + if (op->flags & SETTIMER) + op->count = msg_head->count; + + spin_unlock_bh(&op->bcm_tx_lock); + } + } else { /* insert new BCM operation for the given can_id */
@@ -963,9 +1004,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (!op) return -ENOMEM;
+ spin_lock_init(&op->bcm_tx_lock); op->can_id = msg_head->can_id; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; + op->nframes = msg_head->nframes; + + if (op->flags & SETTIMER) + op->count = msg_head->count;
/* create array for CAN frames and copy the data */ if (msg_head->nframes > 1) { @@ -1024,22 +1070,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
- if (op->nframes != msg_head->nframes) { - op->nframes = msg_head->nframes; - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - - /* check flags */ - - if (op->flags & TX_RESET_MULTI_IDX) { - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - if (op->flags & SETTIMER) { /* set timer values */ - op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); @@ -1056,11 +1088,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->flags |= TX_ANNOUNCE; }
- if (op->flags & TX_ANNOUNCE) { + if (op->flags & TX_ANNOUNCE) bcm_can_tx(op); - if (op->count) - op->count--; - }
if (op->flags & STARTTIMER) bcm_tx_start_timer(op); @@ -1276,7 +1305,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return err; } diff --git a/net/core/dev.c b/net/core/dev.c index 7b7b36c43c82..2ba2160dd093 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6034,16 +6034,18 @@ static DEFINE_PER_CPU(struct work_struct, flush_works); static void flush_backlog(struct work_struct *work) { struct sk_buff *skb, *tmp; + struct sk_buff_head list; struct softnet_data *sd;
+ __skb_queue_head_init(&list); local_bh_disable(); sd = this_cpu_ptr(&softnet_data);
backlog_lock_irq_disable(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev->reg_state == NETREG_UNREGISTERING) { + if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - dev_kfree_skb_irq(skb); + __skb_queue_tail(&list, skb); rps_input_queue_head_incr(sd); } } @@ -6051,14 +6053,16 @@ static void flush_backlog(struct work_struct *work)
local_lock_nested_bh(&softnet_data.process_queue_bh_lock); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev->reg_state == NETREG_UNREGISTERING) { + if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); - kfree_skb(skb); + __skb_queue_tail(&list, skb); rps_input_queue_head_incr(sd); } } local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); local_bh_enable(); + + __skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY); }
static bool flush_required(int cpu) diff --git a/net/core/dev.h b/net/core/dev.h index 2e3bb7669984..764e0097ccf2 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -148,6 +148,18 @@ void xdp_do_check_flushed(struct napi_struct *napi); static inline void xdp_do_check_flushed(struct napi_struct *napi) { } #endif
+/* Best effort check that NAPI is not idle (can't be scheduled to run) */ +static inline void napi_assert_will_not_race(const struct napi_struct *napi) +{ + /* uninitialized instance, can't race */ + if (!napi->poll_list.next) + return; + + /* SCHED bit is set on disabled instances */ + WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); + WARN_ON(READ_ONCE(napi->list_owner) != -1); +} + void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8 diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 7b20f6fcb82c..c8ce069605c4 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -25,6 +25,7 @@
#include <trace/events/page_pool.h>
+#include "dev.h" #include "mp_dmabuf_devmem.h" #include "netmem_priv.h" #include "page_pool_priv.h" @@ -1108,11 +1109,7 @@ void page_pool_disable_direct_recycling(struct page_pool *pool) if (!pool->p.napi) return;
- /* To avoid races with recycling and additional barriers make sure - * pool and NAPI are unlinked when NAPI is disabled. - */ - WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state)); - WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1); + napi_assert_will_not_race(pool->p.napi);
WRITE_ONCE(pool->p.napi, NULL); } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b6db4910359b..762ede027899 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -898,6 +898,10 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) pkt_dev->nr_labels = 0; do { __u32 tmp; + + if (n >= MAX_MPLS_LABELS) + return -E2BIG; + len = hex32_arg(&buffer[i], 8, &tmp); if (len <= 0) return len; @@ -909,8 +913,6 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) return -EFAULT; i++; n++; - if (n >= MAX_MPLS_LABELS) - return -E2BIG; } while (c == ',');
pkt_dev->nr_labels = n; @@ -1896,8 +1898,8 @@ static ssize_t pktgen_thread_write(struct file *file, i = len;
/* Read variable name */ - - len = strn_len(&user_buffer[i], sizeof(name) - 1); + max = min(sizeof(name) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len;
@@ -1927,7 +1929,8 @@ static ssize_t pktgen_thread_write(struct file *file, if (!strcmp(name, "add_device")) { char f[32]; memset(f, 0, 32); - len = strn_len(&user_buffer[i], sizeof(f) - 1); + max = min(sizeof(f) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) { ret = len; goto out; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 281bbac5539d..75197861bc91 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -140,7 +140,12 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev) { - u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + u8 *tag; + + if (skb_linearize(skb)) + return NULL; + + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M, KSZ_EGRESS_TAG_LEN); @@ -311,10 +316,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev) { - /* Tag decoding */ - u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; - unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M; unsigned int len = KSZ_EGRESS_TAG_LEN; + unsigned int port; + u8 *tag; + + if (skb_linearize(skb)) + return NULL; + + /* Tag decoding */ + tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN; + port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
/* Extra 4-bytes PTP timestamp */ if (tag[0] & KSZ9477_PTP_TAG_INDICATION) { diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 44048d7538dd..9d0754b3642f 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -543,6 +543,7 @@ static struct hsr_proto_ops hsr_ops = { .drop_frame = hsr_drop_frame, .fill_frame_info = hsr_fill_frame_info, .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame, + .register_frame_out = hsr_register_frame_out, };
static struct hsr_proto_ops prp_ops = { @@ -553,6 +554,7 @@ static struct hsr_proto_ops prp_ops = { .fill_frame_info = prp_fill_frame_info, .handle_san_frame = prp_handle_san_frame, .update_san_info = prp_update_san_info, + .register_frame_out = prp_register_frame_out, };
void hsr_dev_setup(struct net_device *dev) diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index c0217476eb17..ace4e355d164 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -524,8 +524,8 @@ static void hsr_forward_do(struct hsr_frame_info *frame) * Also for SAN, this shouldn't be done. */ if (!frame->is_from_san && - hsr_register_frame_out(port, frame->node_src, - frame->sequence_nr)) + hsr->proto_ops->register_frame_out && + hsr->proto_ops->register_frame_out(port, frame)) continue;
if (frame->is_supervision && port->type == HSR_PT_MASTER && diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 73bc6f659812..85991fab7db5 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -35,6 +35,7 @@ static bool seq_nr_after(u16 a, u16 b)
#define seq_nr_before(a, b) seq_nr_after((b), (a)) #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) +#define PRP_DROP_WINDOW_LEN 32768
bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr) { @@ -176,8 +177,11 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, new_node->time_in[i] = now; new_node->time_out[i] = now; } - for (i = 0; i < HSR_PT_PORTS; i++) + for (i = 0; i < HSR_PT_PORTS; i++) { new_node->seq_out[i] = seq_out; + new_node->seq_expected[i] = seq_out + 1; + new_node->seq_start[i] = seq_out + 1; + }
if (san && hsr->proto_ops->handle_san_frame) hsr->proto_ops->handle_san_frame(san, rx_port, new_node); @@ -482,9 +486,11 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, * 0 otherwise, or * negative error code on error */ -int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, - u16 sequence_nr) +int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame) { + struct hsr_node *node = frame->node_src; + u16 sequence_nr = frame->sequence_nr; + spin_lock_bh(&node->seq_out_lock); if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) && time_is_after_jiffies(node->time_out[port->type] + @@ -499,6 +505,89 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, return 0; }
+/* Adaptation of the PRP duplicate discard algorithm described in wireshark + * wiki (https://wiki.wireshark.org/PRP) + * + * A drop window is maintained for both LANs with start sequence set to the + * first sequence accepted on the LAN that has not been seen on the other LAN, + * and expected sequence set to the latest received sequence number plus one. + * + * When a frame is received on either LAN it is compared against the received + * frames on the other LAN. If it is outside the drop window of the other LAN + * the frame is accepted and the drop window is updated. + * The drop window for the other LAN is reset. + * + * 'port' is the outgoing interface + * 'frame' is the frame to be sent + * + * Return: + * 1 if frame can be shown to have been sent recently on this interface, + * 0 otherwise + */ +int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame) +{ + enum hsr_port_type other_port; + enum hsr_port_type rcv_port; + struct hsr_node *node; + u16 sequence_diff; + u16 sequence_exp; + u16 sequence_nr; + + /* out-going frames are always in order + * and can be checked the same way as for HSR + */ + if (frame->port_rcv->type == HSR_PT_MASTER) + return hsr_register_frame_out(port, frame); + + /* for PRP we should only forward frames from the slave ports + * to the master port + */ + if (port->type != HSR_PT_MASTER) + return 1; + + node = frame->node_src; + sequence_nr = frame->sequence_nr; + sequence_exp = sequence_nr + 1; + rcv_port = frame->port_rcv->type; + other_port = rcv_port == HSR_PT_SLAVE_A ? HSR_PT_SLAVE_B : + HSR_PT_SLAVE_A; + + spin_lock_bh(&node->seq_out_lock); + if (time_is_before_jiffies(node->time_out[port->type] + + msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)) || + (node->seq_start[rcv_port] == node->seq_expected[rcv_port] && + node->seq_start[other_port] == node->seq_expected[other_port])) { + /* the node hasn't been sending for a while + * or both drop windows are empty, forward the frame + */ + node->seq_start[rcv_port] = sequence_nr; + } else if (seq_nr_before(sequence_nr, node->seq_expected[other_port]) && + seq_nr_before_or_eq(node->seq_start[other_port], sequence_nr)) { + /* drop the frame, update the drop window for the other port + * and reset our drop window + */ + node->seq_start[other_port] = sequence_exp; + node->seq_expected[rcv_port] = sequence_exp; + node->seq_start[rcv_port] = node->seq_expected[rcv_port]; + spin_unlock_bh(&node->seq_out_lock); + return 1; + } + + /* update the drop window for the port where this frame was received + * and clear the drop window for the other port + */ + node->seq_start[other_port] = node->seq_expected[other_port]; + node->seq_expected[rcv_port] = sequence_exp; + sequence_diff = sequence_exp - node->seq_start[rcv_port]; + if (sequence_diff > PRP_DROP_WINDOW_LEN) + node->seq_start[rcv_port] = sequence_exp - PRP_DROP_WINDOW_LEN; + + node->time_out[port->type] = jiffies; + node->seq_out[port->type] = sequence_nr; + spin_unlock_bh(&node->seq_out_lock); + return 0; +} + static struct hsr_port *get_late_port(struct hsr_priv *hsr, struct hsr_node *node) { diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 993fa950d814..b04948659d84 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -44,8 +44,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, u16 sequence_nr); -int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, - u16 sequence_nr); +int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame);
void hsr_prune_nodes(struct timer_list *t); void hsr_prune_proxy_nodes(struct timer_list *t); @@ -73,6 +72,8 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup); bool hsr_is_node_in_db(struct list_head *node_db, const unsigned char addr[ETH_ALEN]);
+int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame); + struct hsr_node { struct list_head mac_list; /* Protect R/W access to seq_out */ @@ -89,6 +90,9 @@ struct hsr_node { bool san_b; u16 seq_out[HSR_PT_PORTS]; bool removed; + /* PRP specific duplicate handling */ + u16 seq_expected[HSR_PT_PORTS]; + u16 seq_start[HSR_PT_PORTS]; struct rcu_head rcu_head; };
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index fcfeb79bb040..e26244456f63 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -183,6 +183,8 @@ struct hsr_proto_ops { struct hsr_frame_info *frame); bool (*invalid_dan_ingress_frame)(__be16 protocol); void (*update_san_info)(struct hsr_node *node, bool is_sup); + int (*register_frame_out)(struct hsr_port *port, + struct hsr_frame_info *frame); };
struct hsr_self_node { diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index f3281312eb5e..cbe4c6fc8b8e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) }
#ifdef CONFIG_INET_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk;
- sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock);
sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, @@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); }
- spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; }
@@ -199,8 +154,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
sk = esp_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); - if (err) + if (err) { + kfree_skb(skb); goto out; + }
bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -209,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk);
+ sock_put(sk); + out: rcu_read_unlock(); return err; @@ -392,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk);
+ sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 793e6781399a..5b7c41333d6f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -829,19 +829,33 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, } }
+ if (cfg->fc_dst_len > 32) { + NL_SET_ERR_MSG(extack, "Invalid prefix length"); + err = -EINVAL; + goto errout; + } + + if (cfg->fc_dst_len < 32 && (ntohl(cfg->fc_dst) << cfg->fc_dst_len)) { + NL_SET_ERR_MSG(extack, "Invalid prefix for given prefix length"); + err = -EINVAL; + goto errout; + } + if (cfg->fc_nh_id) { if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_encap || cfg->fc_mp) { NL_SET_ERR_MSG(extack, "Nexthop specification and nexthop id are mutually exclusive"); - return -EINVAL; + err = -EINVAL; + goto errout; } }
if (has_gw && has_via) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); - return -EINVAL; + err = -EINVAL; + goto errout; }
if (!cfg->fc_table) diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index b07292d50ee7..4563e5303c1a 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -245,9 +245,9 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct nlattr **tb, struct netlink_ext_ack *extack) { - struct net *net = sock_net(skb->sk); + struct fib4_rule *rule4 = (struct fib4_rule *)rule; + struct net *net = rule->fr_net; int err = -EINVAL; - struct fib4_rule *rule4 = (struct fib4_rule *) rule;
if (!inet_validate_dscp(frh->tos)) { NL_SET_ERR_MSG(extack, diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 09e31757e96c..cc86031d2050 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1193,22 +1193,6 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, return 0; }
-static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack) -{ - if (plen > KEYLENGTH) { - NL_SET_ERR_MSG(extack, "Invalid prefix length"); - return false; - } - - if ((plen < KEYLENGTH) && (key << plen)) { - NL_SET_ERR_MSG(extack, - "Invalid prefix for given prefix length"); - return false; - } - - return true; -} - static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old);
@@ -1229,9 +1213,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
key = ntohl(cfg->fc_dst);
- if (!fib_valid_key_len(key, plen, extack)) - return -EINVAL; - pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
fi = fib_create_info(cfg, extack); @@ -1723,9 +1704,6 @@ int fib_table_delete(struct net *net, struct fib_table *tb,
key = ntohl(cfg->fc_dst);
- if (!fib_valid_key_len(key, plen, extack)) - return -EINVAL; - l = fib_find_node(t, &tp, key); if (!l) return -ESRCH; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 9bfcfd016e18..2b4a58824763 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -1230,22 +1230,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) { unsigned int locksz = sizeof(spinlock_t); unsigned int i, nblocks = 1; + spinlock_t *ptr = NULL;
- if (locksz != 0) { - /* allocate 2 cache lines or at least one spinlock per cpu */ - nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); - nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); + if (locksz == 0) + goto set_mask;
- /* no more locks than number of hash buckets */ - nblocks = min(nblocks, hashinfo->ehash_mask + 1); + /* Allocate 2 cache lines or at least one spinlock per cpu. */ + nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus();
- hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); - if (!hashinfo->ehash_locks) - return -ENOMEM; + /* At least one page per NUMA node. */ + nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz); + + nblocks = roundup_pow_of_two(nblocks); + + /* No more locks than number of hash buckets. */ + nblocks = min(nblocks, hashinfo->ehash_mask + 1);
- for (i = 0; i < nblocks; i++) - spin_lock_init(&hashinfo->ehash_locks[i]); + if (num_online_nodes() > 1) { + /* Use vmalloc() to allow NUMA policy to spread pages + * on all available nodes if desired. + */ + ptr = vmalloc_array(nblocks, locksz); + } + if (!ptr) { + ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL); + if (!ptr) + return -ENOMEM; } + for (i = 0; i < nblocks; i++) + spin_lock_init(&ptr[i]); + hashinfo->ehash_locks = ptr; +set_mask: hashinfo->ehash_locks_mask = nblocks - 1; return 0; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f1f31ebfc793..9667f2774025 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -141,7 +141,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info, const struct iphdr *iph; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; - unsigned int data_len = 0; struct ip_tunnel *t;
if (tpi->proto == htons(ETH_P_TEB)) @@ -182,7 +181,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info, case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; - data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ break;
case ICMP_REDIRECT: @@ -190,10 +188,16 @@ static int ipgre_err(struct sk_buff *skb, u32 info, }
#if IS_ENABLED(CONFIG_IPV6) - if (tpi->proto == htons(ETH_P_IPV6) && - !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, - type, data_len)) - return 0; + if (tpi->proto == htons(ETH_P_IPV6)) { + unsigned int data_len = 0; + + if (type == ICMP_TIME_EXCEEDED) + data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ + + if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, + type, data_len)) + return 0; + } #endif
if (t->parms.iph.daddr == 0 || diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d93a5a89c569..d29219e067b7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -419,6 +419,20 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr return false; }
+static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count) +{ + tp->delivered_ce += ecn_count; +} + +/* Updates the delivered and delivered_ce counts */ +static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, + bool ece_ack) +{ + tp->delivered += delivered; + if (ece_ack) + tcp_count_delivered_ce(tp, delivered); +} + /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. @@ -1154,15 +1168,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) } }
-/* Updates the delivered and delivered_ce counts */ -static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, - bool ece_ack) -{ - tp->delivered += delivered; - if (ece_ack) - tp->delivered_ce += delivered; -} - /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). @@ -3862,12 +3867,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) } }
-static inline void tcp_in_ack_event(struct sock *sk, u32 flags) +static void tcp_in_ack_event(struct sock *sk, int flag) { const struct inet_connection_sock *icsk = inet_csk(sk);
- if (icsk->icsk_ca_ops->in_ack_event) - icsk->icsk_ca_ops->in_ack_event(sk, flags); + if (icsk->icsk_ca_ops->in_ack_event) { + u32 ack_ev_flags = 0; + + if (flag & FLAG_WIN_UPDATE) + ack_ev_flags |= CA_ACK_WIN_UPDATE; + if (flag & FLAG_SLOWPATH) { + ack_ev_flags |= CA_ACK_SLOWPATH; + if (flag & FLAG_ECE) + ack_ev_flags |= CA_ACK_ECE; + } + + icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags); + } }
/* Congestion control has updated the cwnd already. So if we're in @@ -3984,12 +4000,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_snd_una_update(tp, ack); flag |= FLAG_WIN_UPDATE;
- tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { - u32 ack_ev_flags = CA_ACK_SLOWPATH; - if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else @@ -4001,19 +4013,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state);
- if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { + if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; - ack_ev_flags |= CA_ACK_ECE; - }
if (sack_state.sack_delivered) tcp_count_delivered(tp, sack_state.sack_delivered, flag & FLAG_ECE); - - if (flag & FLAG_WIN_UPDATE) - ack_ev_flags |= CA_ACK_WIN_UPDATE; - - tcp_in_ack_event(sk, ack_ev_flags); }
/* This is a deviation from RFC3168 since it states that: @@ -4040,6 +4045,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_rack_update_reo_wnd(sk, &rs);
+ tcp_in_ack_event(sk, flag); + if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag);
@@ -4071,6 +4078,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) return 1;
no_queue: + tcp_in_ack_event(sk, flag); /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) { tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index a620618cc568..17d3fc2fab4c 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -182,11 +182,15 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, int offset = skb_gro_offset(skb); const struct net_offload *ops; struct sk_buff *pp = NULL; - int ret; - - offset = offset - sizeof(struct udphdr); + int len, dlen; + __u8 *udpdata; + __be32 *udpdata32;
- if (!pskb_pull(skb, offset)) + len = skb->len - offset; + dlen = offset + min(len, 8); + udpdata = skb_gro_header(skb, dlen, offset); + udpdata32 = (__be32 *)udpdata; + if (unlikely(!udpdata)) return NULL;
rcu_read_lock(); @@ -194,11 +198,10 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (!ops || !ops->callbacks.gro_receive) goto out;
- ret = __xfrm4_udp_encap_rcv(sk, skb, false); - if (ret) + /* check if it is a keepalive or IKE packet */ + if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out;
- skb_push(skb, offset); NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); @@ -208,7 +211,6 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
out: rcu_read_unlock(); - skb_push(skb, offset); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 1;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index b2400c226a32..62d17d7f6d9a 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) }
#ifdef CONFIG_INET6_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk;
- sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock);
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, @@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); }
- spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; }
@@ -216,8 +171,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
sk = esp6_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); - if (err) + if (err) { + kfree_skb(skb); goto out; + }
bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -226,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk);
+ sock_put(sk); + out: rcu_read_unlock(); return err; @@ -422,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk);
+ sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 04a9ed5e8310..29185c9ebd02 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -365,9 +365,9 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct nlattr **tb, struct netlink_ext_ack *extack) { + struct fib6_rule *rule6 = (struct fib6_rule *)rule; + struct net *net = rule->fr_net; int err = -EINVAL; - struct net *net = sock_net(skb->sk); - struct fib6_rule *rule6 = (struct fib6_rule *) rule;
if (!inet_validate_dscp(frh->tos)) { NL_SET_ERR_MSG(extack, diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 235808cfec70..68e9a41eed49 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1498,7 +1498,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) tunnel = netdev_priv(dev);
tunnel->dev = dev; - tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name);
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); @@ -1882,7 +1881,6 @@ static int ip6erspan_tap_init(struct net_device *dev) tunnel = netdev_priv(dev);
tunnel->dev = dev; - tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name);
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 434ddf263b88..89a61e040e6a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1386,6 +1386,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, } v6_cork->hop_limit = ipc6->hlimit; v6_cork->tclass = ipc6->tclass; + v6_cork->dontfrag = ipc6->dontfrag; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ? READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); @@ -1417,7 +1418,7 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, - unsigned int flags, struct ipcm6_cookie *ipc6) + unsigned int flags) { struct sk_buff *skb, *skb_prev = NULL; struct inet_cork *cork = &cork_full->base; @@ -1471,7 +1472,7 @@ static int __ip6_append_data(struct sock *sk, if (headersize + transhdrlen > mtu) goto emsgsize;
- if (cork->length + length > mtu - headersize && ipc6->dontfrag && + if (cork->length + length > mtu - headersize && v6_cork->dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_ICMPV6 || sk->sk_protocol == IPPROTO_RAW)) { @@ -1843,7 +1844,7 @@ int ip6_append_data(struct sock *sk,
return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, ipc6); + from, length, transhdrlen, flags); } EXPORT_SYMBOL_GPL(ip6_append_data);
@@ -2048,7 +2049,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, err = __ip6_append_data(sk, &queue, cork, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, ipc6); + flags); if (err) { __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 48fd53b98972..5350c9bb2319 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1878,7 +1878,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev) int t_hlen;
t->dev = dev; - t->net = dev_net(dev);
ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); if (ret) @@ -1940,6 +1939,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ t->net = net; t->parms.proto = IPPROTO_IPV6;
rcu_assign_pointer(ip6n->tnls_wc[0], t); @@ -2013,6 +2013,7 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, int err;
nt = netdev_priv(dev); + nt->net = net;
if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { err = ip6_tnl_encap_setup(nt, &ipencap); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 590737c27537..012350469144 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -925,7 +925,6 @@ static inline int vti6_dev_init_gen(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev);
t->dev = dev; - t->net = dev_net(dev); netdev_hold(dev, &t->dev_tracker, GFP_KERNEL); netdev_lockdep_set_classes(dev); return 0; @@ -958,6 +957,7 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ t->net = net; t->parms.proto = IPPROTO_IPV6;
rcu_assign_pointer(ip6n->tnls_wc[0], t); @@ -1008,6 +1008,7 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev, vti6_netlink_parms(data, &nt->parms);
nt->parms.proto = IPPROTO_IPV6; + nt->net = net;
if (vti6_locate(net, &nt->parms, 0)) return -EEXIST; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 39bd8951bfca..3c15a0ae228e 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -269,6 +269,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
nt = netdev_priv(dev);
+ nt->net = net; nt->parms = *parms; if (ipip6_tunnel_create(dev) < 0) goto failed_free; @@ -1449,7 +1450,6 @@ static int ipip6_tunnel_init(struct net_device *dev) int err;
tunnel->dev = dev; - tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name);
ipip6_tunnel_bind_dev(dev); @@ -1563,6 +1563,7 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, int err;
nt = netdev_priv(dev); + nt->net = net;
if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { err = ip_tunnel_encap_setup(nt, &ipencap); @@ -1858,6 +1859,9 @@ static int __net_init sit_init_net(struct net *net) */ sitn->fb_tunnel_dev->netns_local = true;
+ t = netdev_priv(sitn->fb_tunnel_dev); + t->net = net; + err = register_netdev(sitn->fb_tunnel_dev); if (err) goto err_reg_dev; @@ -1865,8 +1869,6 @@ static int __net_init sit_init_net(struct net *net) ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
- t = netdev_priv(sitn->fb_tunnel_dev); - strcpy(t->parms.name, sitn->fb_tunnel_dev->name); return 0;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 4abc5e9d6322..841c81abaaf4 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -179,14 +179,18 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, int offset = skb_gro_offset(skb); const struct net_offload *ops; struct sk_buff *pp = NULL; - int ret; + int len, dlen; + __u8 *udpdata; + __be32 *udpdata32;
if (skb->protocol == htons(ETH_P_IP)) return xfrm4_gro_udp_encap_rcv(sk, head, skb);
- offset = offset - sizeof(struct udphdr); - - if (!pskb_pull(skb, offset)) + len = skb->len - offset; + dlen = offset + min(len, 8); + udpdata = skb_gro_header(skb, dlen, offset); + udpdata32 = (__be32 *)udpdata; + if (unlikely(!udpdata)) return NULL;
rcu_read_lock(); @@ -194,11 +198,10 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, if (!ops || !ops->callbacks.gro_receive) goto out;
- ret = __xfrm6_udp_encap_rcv(sk, skb, false); - if (ret) + /* check if it is a keepalive or IKE packet */ + if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0) goto out;
- skb_push(skb, offset); NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); @@ -208,7 +211,6 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
out: rcu_read_unlock(); - skb_push(skb, offset); NAPI_GRO_CB(skb)->same_flow = 0; NAPI_GRO_CB(skb)->flush = 1;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 0259cde394ba..cc77ec5769d8 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -887,15 +887,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (sk->sk_type != SOCK_STREAM) goto copy_uaddr;
+ /* Partial read */ + if (used + offset < skb_len) + continue; + if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); *seq = 0; } - - /* Partial read */ - if (used + offset < skb_len) - continue; } while (len > 0);
out: diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index a06644084d15..d1c10f5f9516 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH -* Copyright (C) 2018-2019, 2021-2024 Intel Corporation +* Copyright (C) 2018-2019, 2021-2025 Intel Corporation */
#ifndef __MAC80211_DRIVER_OPS @@ -955,6 +955,7 @@ static inline void drv_mgd_complete_tx(struct ieee80211_local *local, return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+ info->link_id = info->link_id < 0 ? 0 : info->link_id; trace_drv_mgd_complete_tx(local, sdata, info->duration, info->subtype, info->success); if (local->ops->mgd_complete_tx) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cc8c5d18b130..8fa9b9dd4611 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -8,7 +8,7 @@ * Copyright 2007, Michael Wu flamingice@sourmilk.net * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation */
#include <linux/delay.h> @@ -3589,7 +3589,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, if (tx) ieee80211_flush_queues(local, sdata, false);
- drv_mgd_complete_tx(sdata->local, sdata, &info); + if (tx || frame_buf) + drv_mgd_complete_tx(sdata->local, sdata, &info);
/* clear AP addr only after building the needed mgmt frames */ eth_zero_addr(sdata->deflink.u.mgd.bssid); @@ -4033,7 +4034,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) struct ieee80211_link_data *link;
link = sdata_dereference(sdata->link[link_id], sdata); - if (!link) + if (!link || !link->conf->bss) continue; cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss); link->conf->bss = NULL; @@ -4306,6 +4307,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); status_code = le16_to_cpu(mgmt->u.auth.status_code);
+ info.link_id = ifmgd->auth_data->link_id; + if (auth_alg != ifmgd->auth_data->algorithm || (auth_alg != WLAN_AUTH_SAE && auth_transaction != ifmgd->auth_data->expected_transaction) || @@ -9219,7 +9222,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); - drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; }
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index e35178f5205f..bb76295d04c5 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -589,11 +589,9 @@ int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info) if (ret < 0) goto set_flags_err;
- if (attr_rem) { - ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem); - if (ret < 0) - goto set_flags_err; - } + ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem); + if (ret < 0) + goto set_flags_err;
if (loc.addr.family == AF_UNSPEC || rem.addr.family == AF_UNSPEC) { diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 7d4f0fa8b609..3ea60ff7a6a4 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -619,7 +619,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_COUNT] = { .procname = "nf_conntrack_count", @@ -655,7 +657,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { .data = &nf_ct_expect_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_ACCT] = { .procname = "nf_conntrack_acct", @@ -948,7 +952,9 @@ static struct ctl_table nf_ct_netfilter_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, };
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index cb8c525ea20e..7986145a527c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1569,6 +1569,9 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; }
+ sch->qstats.backlog += len; + sch->q.qlen++; + if (first && !cl->cl_nactive) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); @@ -1584,9 +1587,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
}
- sch->qstats.backlog += len; - sch->q.qlen++; - return NET_XMIT_SUCCESS; }
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 716808f374a8..b391c2ef463f 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -1079,14 +1079,16 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct net_device *base_ndev; struct net *net;
- ndev = pnet_find_base_ndev(ndev); + base_ndev = pnet_find_base_ndev(ndev); net = dev_net(ndev); - if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + if (smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid) && + smc_pnet_find_ndev_pnetid_by_table(base_ndev, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { - smc_pnet_find_rdma_dev(ndev, ini); + smc_pnet_find_rdma_dev(base_ndev, ini); return; /* pnetid could not be determined */ } _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 0090162ee8c3..17a4de75bfaf 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -270,9 +270,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, old = rcu_dereference_protected(clnt->cl_xprt, lockdep_is_held(&clnt->cl_lock));
- if (!xprt_bound(xprt)) - clnt->cl_autobind = 1; - clnt->cl_timeout = timeout; rcu_assign_pointer(clnt->cl_xprt, xprt); spin_unlock(&clnt->cl_lock); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 102c3818bc54..53bcca365fb1 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -820,9 +820,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data) }
trace_rpcb_setport(child, map->r_status, map->r_port); - xprt->ops->set_port(xprt, map->r_port); - if (map->r_port) + if (map->r_port) { + xprt->ops->set_port(xprt, map->r_port); xprt_set_bound(xprt); + } }
/* diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 9b45fbdc90ca..73bc39281ef5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) { + if (unlikely(current->flags & PF_EXITING)) + return -EINTR; schedule(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index c524421ec652..8584893b4785 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, goto exit; }
+ /* Get net to avoid freed tipc_crypto when delete namespace */ + get_net(aead->crypto->net); + /* Now, do encrypt */ rc = crypto_aead_encrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) return rc;
tipc_bearer_put(b); + put_net(aead->crypto->net);
exit: kfree(ctx); @@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(void *data, int err) kfree(tx_ctx); tipc_bearer_put(b); tipc_aead_put(aead); + put_net(net); }
/** diff --git a/net/wireless/chan.c b/net/wireless/chan.c index e579d7e1425f..c4f3fefeb354 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -6,7 +6,7 @@ * * Copyright 2009 Johannes Berg johannes@sipsolutions.net * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2018-2024 Intel Corporation + * Copyright 2018-2025 Intel Corporation */
#include <linux/export.h> @@ -1621,6 +1621,12 @@ bool cfg80211_reg_check_beaconing(struct wiphy *wiphy, if (cfg->reg_power == IEEE80211_REG_VLP_AP) permitting_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
+ if ((cfg->iftype == NL80211_IFTYPE_P2P_GO || + cfg->iftype == NL80211_IFTYPE_AP) && + (chandef->width == NL80211_CHAN_WIDTH_20_NOHT || + chandef->width == NL80211_CHAN_WIDTH_20)) + permitting_flags |= IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY; + return _cfg80211_reg_can_beacon(wiphy, chandef, cfg->iftype, check_no_ir ? IEEE80211_CHAN_NO_IR : 0, permitting_flags); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ecfceddce00f..c778ffa1c8ef 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1213,6 +1213,10 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY) && + nla_put_flag(msg, + NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY)) + goto nla_put_failure; }
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2b626078739c..f6846eb0f4b8 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -5,7 +5,7 @@ * Copyright 2008-2011 Luis R. Rodriguez mcgrof@qca.qualcomm.com * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -1603,6 +1603,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_PSD; if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP) channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; + if (rd_flags & NL80211_RRF_ALLOW_20MHZ_ACTIVITY) + channel_flags |= IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY; return channel_flags; }
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index fe82e2d07300..fc7a603b04f1 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb) struct espintcp_ctx *ctx = espintcp_getctx(sk);
if (skb_queue_len(&ctx->out_queue) >= - READ_ONCE(net_hotdata.max_backlog)) + READ_ONCE(net_hotdata.max_backlog)) { + kfree_skb(skb); return -ENOBUFS; + }
__skb_queue_tail(&ctx->out_queue, skb);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8a1b83191a6c..2c42d83fbaa2 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1581,6 +1581,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) struct xfrm_policy *delpol; struct hlist_head *chain;
+ /* Sanitize mark before store */ + policy->mark.v &= policy->mark.m; + spin_lock_bh(&net->xfrm.xfrm_policy_lock); chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); if (chain) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 711e816fc404..abd725386cb6 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -773,9 +773,6 @@ int __xfrm_state_delete(struct xfrm_state *x) xfrm_nat_keepalive_state_updated(x); spin_unlock(&net->xfrm.xfrm_state_lock);
- if (x->encap_sk) - sock_put(rcu_dereference_raw(x->encap_sk)); - xfrm_dev_state_delete(x);
/* All xfrm_state objects are created by xfrm_state_alloc. @@ -1656,6 +1653,9 @@ static void __xfrm_state_insert(struct xfrm_state *x)
list_add(&x->km.all, &net->xfrm.state_all);
+ /* Sanitize mark before store */ + x->mark.v &= x->mark.m; + h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family); XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 87013623773a..da2a1c00ca8a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -178,6 +178,12 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay seq and seq_hi should be 0 for output SA"); return -EINVAL; } + if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) { + NL_SET_ERR_MSG( + extack, + "Replay oseq_hi should be 0 in non-ESN mode for output SA"); + return -EINVAL; + } if (rs->bmp_len) { NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); return -EINVAL; @@ -190,6 +196,12 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay oseq and oseq_hi should be 0 for input SA"); return -EINVAL; } + if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) { + NL_SET_ERR_MSG( + extack, + "Replay seq_hi should be 0 in non-ESN mode for input SA"); + return -EINVAL; + } }
return 0; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 7afe040cf43b..f06f88b26183 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -400,7 +400,7 @@ $(obj)/%.o: $(src)/%.c @echo " CLANG-bpf " $@ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \ -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \ - -I$(LIBBPF_INCLUDE) \ + -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \ -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \ -Wno-gnu-variable-sized-type-not-at-end \ diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 686197407c3c..5652d9035232 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -8,6 +8,7 @@
# Default set of warnings, always enabled KBUILD_CFLAGS += -Wall +KBUILD_CFLAGS += -Wextra KBUILD_CFLAGS += -Wundef KBUILD_CFLAGS += -Werror=implicit-function-declaration KBUILD_CFLAGS += -Werror=implicit-int @@ -15,7 +16,7 @@ KBUILD_CFLAGS += -Werror=return-type KBUILD_CFLAGS += -Werror=strict-prototypes KBUILD_CFLAGS += -Wno-format-security KBUILD_CFLAGS += -Wno-trigraphs -KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) +KBUILD_CFLAGS += $(call cc-disable-warning, frame-address) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += -Wmissing-declarations KBUILD_CFLAGS += -Wmissing-prototypes @@ -68,6 +69,13 @@ KBUILD_CFLAGS += -Wno-pointer-sign # globally built with -Wcast-function-type. KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type)
+# Currently, disable -Wstringop-overflow for GCC 11, globally. +KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow) +KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) + +# Currently, disable -Wunterminated-string-initialization as broken +KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization) + # The allocators already balk at large sizes, so silence the compiler # warnings for bounds checks involving those possible values. While # -Wno-alloc-size-larger-than would normally be used here, earlier versions @@ -97,7 +105,6 @@ KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion) # Explicitly clear padding bits during variable initialization KBUILD_CFLAGS += $(call cc-option,-fzero-init-padding-bits=all)
-KBUILD_CFLAGS += -Wextra KBUILD_CFLAGS += -Wunused
# diff --git a/scripts/config b/scripts/config index ff88e2faefd3..ea475c07de28 100755 --- a/scripts/config +++ b/scripts/config @@ -32,6 +32,7 @@ commands: Disable option directly after other option --module-after|-M beforeopt option Turn option into module directly after other option + --refresh Refresh the config using old settings
commands can be repeated multiple times
@@ -124,16 +125,22 @@ undef_var() { txt_delete "^# $name is not set" "$FN" }
-if [ "$1" = "--file" ]; then - FN="$2" - if [ "$FN" = "" ] ; then - usage +FN=.config +CMDS=() +while [[ $# -gt 0 ]]; do + if [ "$1" = "--file" ]; then + if [ "$2" = "" ]; then + usage + fi + FN="$2" + shift 2 + else + CMDS+=("$1") + shift fi - shift 2 -else - FN=.config -fi +done
+set -- "${CMDS[@]}" if [ "$1" = "" ] ; then usage fi @@ -217,9 +224,8 @@ while [ "$1" != "" ] ; do set_var "${CONFIG_}$B" "${CONFIG_}$B=m" "${CONFIG_}$A" ;;
- # undocumented because it ignores --file (fixme) --refresh) - yes "" | make oldconfig + yes "" | make oldconfig KCONFIG_CONFIG=$FN ;;
*) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 3b55e7a4131d..ac95661a1c9d 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -385,7 +385,7 @@ int conf_read_simple(const char *name, int def)
def_flags = SYMBOL_DEF << def; for_all_symbols(sym) { - sym->flags &= ~(def_flags|SYMBOL_VALID); + sym->flags &= ~def_flags; switch (sym->type) { case S_INT: case S_HEX: @@ -398,7 +398,11 @@ int conf_read_simple(const char *name, int def) } }
- expr_invalidate_all(); + if (def == S_DEF_USER) { + for_all_symbols(sym) + sym->flags &= ~SYMBOL_VALID; + expr_invalidate_all(); + }
while (getline_stripped(&line, &line_asize, in) != -1) { struct menu *choice; @@ -464,6 +468,9 @@ int conf_read_simple(const char *name, int def) if (conf_set_sym_val(sym, def, def_flags, val)) continue;
+ if (def != S_DEF_USER) + continue; + /* * If this is a choice member, give it the highest priority. * If conflicting CONFIG options are given from an input file, @@ -967,10 +974,8 @@ static int conf_touch_deps(void) depfile_path[depfile_prefix_len] = 0;
conf_read_simple(name, S_DEF_AUTO); - sym_calc_value(modules_sym);
for_all_symbols(sym) { - sym_calc_value(sym); if (sym_is_choice(sym)) continue; if (sym->flags & SYMBOL_WRITE) { @@ -1084,12 +1089,12 @@ int conf_write_autoconf(int overwrite) if (ret) return -1;
- if (conf_touch_deps()) - return 1; - for_all_symbols(sym) sym_calc_value(sym);
+ if (conf_touch_deps()) + return 1; + ret = __conf_write_autoconf(conf_get_autoheader_name(), print_symbol_for_c, &comment_style_c); diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 0b7952471c18..79c09b378be8 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -112,8 +112,8 @@ INITFILE=$1 shift;
if [ ! -r "$INITFILE" ]; then - echo "The base file '$INITFILE' does not exist. Exit." >&2 - exit 1 + echo "The base file '$INITFILE' does not exist. Creating one..." >&2 + touch "$INITFILE" fi
MERGE_LIST=$* diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index a9aab10bebca..2f3f267e7216 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -245,7 +245,9 @@ static int process_measurement(struct file *file, const struct cred *cred, &allowed_algos); violation_check = ((func == FILE_CHECK || func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) && - (ima_policy_flag & IMA_MEASURE)); + (ima_policy_flag & IMA_MEASURE) && + ((action & IMA_MEASURE) || + (file->f_mode & FMODE_WRITE))); if (!action && !violation_check) return 0;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 5dd1e164f9b1..1e35c9f807b2 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -830,7 +830,7 @@ static int smk_open_cipso(struct inode *inode, struct file *file) static ssize_t smk_set_cipso(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int format) { - struct netlbl_lsm_catmap *old_cat, *new_cat = NULL; + struct netlbl_lsm_catmap *old_cat; struct smack_known *skp; struct netlbl_lsm_secattr ncats; char mapcatset[SMK_CIPSOLEN]; @@ -917,22 +917,15 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
smack_catset_bit(cat, mapcatset); } - ncats.flags = 0; - if (catlen == 0) { - ncats.attr.mls.cat = NULL; - ncats.attr.mls.lvl = maplevel; - new_cat = netlbl_catmap_alloc(GFP_ATOMIC); - if (new_cat) - new_cat->next = ncats.attr.mls.cat; - ncats.attr.mls.cat = new_cat; - skp->smk_netlabel.flags &= ~(1U << 3); - rc = 0; - } else { - rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); - } + + rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); if (rc >= 0) { old_cat = skp->smk_netlabel.attr.mls.cat; rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); + if (ncats.attr.mls.cat) + skp->smk_netlabel.flags |= NETLBL_SECATTR_MLS_CAT; + else + skp->smk_netlabel.flags &= ~(u32)NETLBL_SECATTR_MLS_CAT; skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; synchronize_rcu(); netlbl_catmap_free(old_cat); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 4683b9139c56..4ecb17bd5436 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1074,8 +1074,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; - if (runtime->dma_area) - snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_runtime_buffer_set_silence(runtime);
runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 0790b5fd917e..0a1ba26872f8 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -723,6 +723,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) atomic_inc(&runtime->buffer_accessing); }
+/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) +{ + snd_pcm_buffer_access_lock(runtime); + if (runtime->dma_area) + snd_pcm_format_set_silence(runtime->format, runtime->dma_area, + bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_buffer_access_unlock(runtime); +} +EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); + #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define is_oss_stream(substream) ((substream)->oss.oss) #else diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index b74de9c0969f..9e59a97f4747 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1164,8 +1164,7 @@ static __poll_t snd_seq_poll(struct file *file, poll_table * wait) if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
/* check if data is available in the pool */ - if (!snd_seq_write_pool_allocated(client) || - snd_seq_pool_poll_wait(client->pool, file, wait)) + if (snd_seq_pool_poll_wait(client->pool, file, wait)) mask |= EPOLLOUT | EPOLLWRNORM; }
@@ -2583,8 +2582,6 @@ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table if (client == NULL) return -ENXIO;
- if (! snd_seq_write_pool_allocated(client)) - return 1; if (snd_seq_pool_poll_wait(client->pool, file, wait)) return 1; return 0; diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index 20155e3e87c6..ccde0ca3d208 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -427,6 +427,7 @@ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait) { poll_wait(file, &pool->output_sleep, wait); + guard(spinlock_irq)(&pool->lock); return snd_seq_output_ok(pool); }
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index e51d47572557..13a7d92e8d8d 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -31,8 +31,9 @@ static void generate_tone(struct hda_beep *beep, int tone) beep->power_hook(beep, true); beep->playing = 1; } - snd_hda_codec_write(codec, beep->nid, 0, - AC_VERB_SET_BEEP_CONTROL, tone); + if (!codec->beep_just_power_on) + snd_hda_codec_write(codec, beep->nid, 0, + AC_VERB_SET_BEEP_CONTROL, tone); if (!tone && beep->playing) { beep->playing = 0; if (beep->power_hook) @@ -212,10 +213,12 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid) struct hda_beep *beep; int err;
- if (!snd_hda_get_bool_hint(codec, "beep")) - return 0; /* disabled explicitly by hints */ - if (codec->beep_mode == HDA_BEEP_MODE_OFF) - return 0; /* disabled by module option */ + if (!codec->beep_just_power_on) { + if (!snd_hda_get_bool_hint(codec, "beep")) + return 0; /* disabled explicitly by hints */ + if (codec->beep_mode == HDA_BEEP_MODE_OFF) + return 0; /* disabled by module option */ + }
beep = kzalloc(sizeof(*beep), GFP_KERNEL); if (beep == NULL) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index db72c5fce9d1..13ffc9a6555f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -28,6 +28,7 @@ #include <sound/hda_codec.h> #include "hda_local.h" #include "hda_auto_parser.h" +#include "hda_beep.h" #include "hda_jack.h" #include "hda_generic.h" #include "hda_component.h" @@ -6964,6 +6965,41 @@ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec, } }
+/* GPIO1 = amplifier on/off */ +static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct alc_spec *spec = codec->spec; + static const hda_nid_t conn[] = { 0x02 }; + static const struct hda_pintbl pincfgs[] = { + { 0x14, 0x90170110 }, /* front/high speakers */ + { 0x17, 0x90170130 }, /* back/bass speakers */ + { } + }; + + // enable mute led + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + /* needed for amp of back speakers */ + spec->gpio_mask |= 0x01; + spec->gpio_dir |= 0x01; + snd_hda_apply_pincfgs(codec, pincfgs); + /* share DAC to have unified volume control */ + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + break; + case HDA_FIXUP_ACT_INIT: + /* need to toggle GPIO to enable the amp of back speakers */ + alc_update_gpio_data(codec, 0x01, true); + msleep(100); + alc_update_gpio_data(codec, 0x01, false); + break; + } +} + static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -7036,6 +7072,30 @@ static void alc285_fixup_hp_envy_x360(struct hda_codec *codec, } }
+static void alc285_fixup_hp_beep(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + codec->beep_just_power_on = true; + } else if (action == HDA_FIXUP_ACT_INIT) { +#ifdef CONFIG_SND_HDA_INPUT_BEEP + /* + * Just enable loopback to internal speaker and headphone jack. + * Disable amplification to get about the same beep volume as + * was on pure BIOS setup before loading the driver. + */ + alc_update_coef_idx(codec, 0x36, 0x7070, BIT(13)); + + snd_hda_enable_beep_device(codec, 1); + +#if !IS_ENABLED(CONFIG_INPUT_PCSPKR) + dev_warn_once(hda_codec_dev(codec), + "enable CONFIG_INPUT_PCSPKR to get PC beeps\n"); +#endif +#endif + } +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c"
@@ -7705,6 +7765,7 @@ enum { ALC280_FIXUP_HP_9480M, ALC245_FIXUP_HP_X360_AMP, ALC285_FIXUP_HP_SPECTRE_X360_EB1, + ALC285_FIXUP_HP_SPECTRE_X360_DF1, ALC285_FIXUP_HP_ENVY_X360, ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -7806,6 +7867,7 @@ enum { ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, + ALC285_FIXUP_HP_BEEP_MICMUTE_LED, ALC236_FIXUP_HP_MUTE_LED_COEFBIT2, ALC236_FIXUP_HP_GPIO_LED, ALC236_FIXUP_HP_MUTE_LED, @@ -9395,6 +9457,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_mute_led, }, + [ALC285_FIXUP_HP_BEEP_MICMUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_beep, + .chained = true, + .chain_id = ALC285_FIXUP_HP_MUTE_LED, + }, [ALC236_FIXUP_HP_MUTE_LED_COEFBIT2] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led_coefbit2, @@ -9766,6 +9834,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_eb1 }, + [ALC285_FIXUP_HP_SPECTRE_X360_DF1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_spectre_x360_df1 + }, [ALC285_FIXUP_HP_ENVY_X360] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_envy_x360, @@ -10483,6 +10555,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1), SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -10493,7 +10566,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), - SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8760, "HP EliteBook 8{4,5}5 G7", ALC285_FIXUP_HP_BEEP_MICMUTE_LED), SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), @@ -11109,6 +11182,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x390d, "Lenovo Yoga Pro 7 14ASP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x391f, "Yoga S990-16 pro Quad YC Quad", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x17aa, 0x3920, "Yoga S990-16 pro Quad VECO Quad", ALC287_FIXUP_TAS2781_I2C), @@ -11372,6 +11446,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, + {.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"}, {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"}, diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c index 73d764fc8539..984a7f470a31 100644 --- a/sound/soc/codecs/cs42l43-jack.c +++ b/sound/soc/codecs/cs42l43-jack.c @@ -654,6 +654,10 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv)
reinit_completion(&priv->type_detect);
+ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK); + cs42l43_start_hs_bias(priv, true); regmap_update_bits(cs42l43->regmap, CS42L43_HS2, CS42L43_HSDET_MODE_MASK, 0x3 << CS42L43_HSDET_MODE_SHIFT); @@ -665,6 +669,9 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv) CS42L43_HSDET_MODE_MASK, 0x2 << CS42L43_HSDET_MODE_SHIFT); cs42l43_stop_hs_bias(priv);
+ regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, 0); + if (!time_left) return -ETIMEDOUT;
diff --git a/sound/soc/codecs/mt6359-accdet.h b/sound/soc/codecs/mt6359-accdet.h index c234f2f4276a..78ada3a5bfae 100644 --- a/sound/soc/codecs/mt6359-accdet.h +++ b/sound/soc/codecs/mt6359-accdet.h @@ -123,6 +123,15 @@ struct mt6359_accdet { struct workqueue_struct *jd_workqueue; };
+#if IS_ENABLED(CONFIG_SND_SOC_MT6359_ACCDET) int mt6359_accdet_enable_jack_detect(struct snd_soc_component *component, struct snd_soc_jack *jack); +#else +static inline int +mt6359_accdet_enable_jack_detect(struct snd_soc_component *component, + struct snd_soc_jack *jack) +{ + return -EOPNOTSUPP; +} +#endif #endif diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c index fac0617ab95b..6cbb8d0535b0 100644 --- a/sound/soc/codecs/pcm3168a.c +++ b/sound/soc/codecs/pcm3168a.c @@ -493,9 +493,9 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream, } break; case 24: - if (provider_mode || (format == SND_SOC_DAIFMT_DSP_A) || - (format == SND_SOC_DAIFMT_DSP_B)) { - dev_err(component->dev, "24-bit slots not supported in provider mode, or consumer mode using DSP\n"); + if (!provider_mode && ((format == SND_SOC_DAIFMT_DSP_A) || + (format == SND_SOC_DAIFMT_DSP_B))) { + dev_err(component->dev, "24-bit slots not supported in consumer mode using DSP\n"); return -EINVAL; } break; diff --git a/sound/soc/codecs/pcm6240.c b/sound/soc/codecs/pcm6240.c index 5d99877f8839..e59bb77edf09 100644 --- a/sound/soc/codecs/pcm6240.c +++ b/sound/soc/codecs/pcm6240.c @@ -14,7 +14,7 @@
#include <linux/unaligned.h> #include <linux/firmware.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_irq.h> @@ -2035,10 +2035,8 @@ static const struct regmap_config pcmdevice_i2c_regmap = {
static void pcmdevice_remove(struct pcmdevice_priv *pcm_dev) { - if (gpio_is_valid(pcm_dev->irq_info.gpio)) { - gpio_free(pcm_dev->irq_info.gpio); - free_irq(pcm_dev->irq_info.nmb, pcm_dev); - } + if (pcm_dev->irq) + free_irq(pcm_dev->irq, pcm_dev); mutex_destroy(&pcm_dev->codec_lock); }
@@ -2110,7 +2108,7 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c) ndev = 1; dev_addrs[0] = i2c->addr; } - pcm_dev->irq_info.gpio = of_irq_get(np, 0); + pcm_dev->irq = of_irq_get(np, 0);
for (i = 0; i < ndev; i++) pcm_dev->addr[i] = dev_addrs[i]; @@ -2133,22 +2131,10 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c)
if (pcm_dev->chip_id == PCM1690) goto skip_interrupt; - if (gpio_is_valid(pcm_dev->irq_info.gpio)) { - dev_dbg(pcm_dev->dev, "irq-gpio = %d", pcm_dev->irq_info.gpio); - - ret = gpio_request(pcm_dev->irq_info.gpio, "PCMDEV-IRQ"); - if (!ret) { - int gpio = pcm_dev->irq_info.gpio; - - gpio_direction_input(gpio); - pcm_dev->irq_info.nmb = gpio_to_irq(gpio); - - } else - dev_err(pcm_dev->dev, "%s: GPIO %d request error\n", - __func__, pcm_dev->irq_info.gpio); + if (pcm_dev->irq) { + dev_dbg(pcm_dev->dev, "irq = %d", pcm_dev->irq); } else - dev_err(pcm_dev->dev, "Looking up irq-gpio failed %d\n", - pcm_dev->irq_info.gpio); + dev_err(pcm_dev->dev, "No irq provided\n");
skip_interrupt: ret = devm_snd_soc_register_component(&i2c->dev, diff --git a/sound/soc/codecs/pcm6240.h b/sound/soc/codecs/pcm6240.h index 1e125bb97286..2d8f9e798139 100644 --- a/sound/soc/codecs/pcm6240.h +++ b/sound/soc/codecs/pcm6240.h @@ -208,11 +208,6 @@ struct pcmdevice_regbin { struct pcmdevice_config_info **cfg_info; };
-struct pcmdevice_irqinfo { - int gpio; - int nmb; -}; - struct pcmdevice_priv { struct snd_soc_component *component; struct i2c_client *client; @@ -221,7 +216,7 @@ struct pcmdevice_priv { struct gpio_desc *hw_rst; struct regmap *regmap; struct pcmdevice_regbin regbin; - struct pcmdevice_irqinfo irq_info; + int irq; unsigned int addr[PCMDEVICE_MAX_I2C_DEVICES]; unsigned int chip_id; int cur_conf; diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c index 5449d6b5cf3d..bf83f4bc94fc 100644 --- a/sound/soc/codecs/rt722-sdca-sdw.c +++ b/sound/soc/codecs/rt722-sdca-sdw.c @@ -28,9 +28,50 @@ static bool rt722_sdca_readable_register(struct device *dev, unsigned int reg) 0): case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_GE49, RT722_SDCA_CTL_DETECTED_MODE, 0): - case SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, RT722_SDCA_CTL_HIDTX_CURRENT_OWNER, - 0) ... SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, - RT722_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_XU03, RT722_SDCA_CTL_SELECTED_MODE, + 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU05, + RT722_SDCA_CTL_FU_MUTE, CH_L) ... + SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU05, + RT722_SDCA_CTL_FU_MUTE, CH_R): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_XU0D, + RT722_SDCA_CTL_SELECTED_MODE, 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU0F, + RT722_SDCA_CTL_FU_MUTE, CH_L) ... + SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_USER_FU0F, + RT722_SDCA_CTL_FU_MUTE, CH_R): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_PDE40, + RT722_SDCA_CTL_REQ_POWER_STATE, 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_PDE12, + RT722_SDCA_CTL_REQ_POWER_STATE, 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_CS01, + RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): + case SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT722_SDCA_ENT_CS11, + RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): + case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, + RT722_SDCA_CTL_FU_MUTE, CH_01) ... + SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, + RT722_SDCA_CTL_FU_MUTE, CH_04): + case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_IT26, + RT722_SDCA_CTL_VENDOR_DEF, 0): + case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_PDE2A, + RT722_SDCA_CTL_REQ_POWER_STATE, 0): + case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_CS1F, + RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): + case SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, + RT722_SDCA_CTL_HIDTX_CURRENT_OWNER, 0) ... + SDW_SDCA_CTL(FUNC_NUM_HID, RT722_SDCA_ENT_HID01, + RT722_SDCA_CTL_HIDTX_MESSAGE_LENGTH, 0): + case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_USER_FU06, + RT722_SDCA_CTL_FU_MUTE, CH_L) ... + SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_USER_FU06, + RT722_SDCA_CTL_FU_MUTE, CH_R): + case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_OT23, + RT722_SDCA_CTL_VENDOR_DEF, CH_08): + case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_PDE23, + RT722_SDCA_CTL_REQ_POWER_STATE, 0): + case SDW_SDCA_CTL(FUNC_NUM_AMP, RT722_SDCA_ENT_CS31, + RT722_SDCA_CTL_SAMPLE_FREQ_INDEX, 0): case RT722_BUF_ADDR_HID1 ... RT722_BUF_ADDR_HID2: return true; default: @@ -74,6 +115,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re case 0x5600000 ... 0x5600007: case 0x5700000 ... 0x5700004: case 0x5800000 ... 0x5800004: + case 0x5810000: case 0x5b00003: case 0x5c00011: case 0x5d00006: @@ -81,6 +123,7 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re case 0x5f00030: case 0x6100000 ... 0x6100051: case 0x6100055 ... 0x6100057: + case 0x6100060: case 0x6100062: case 0x6100064 ... 0x6100065: case 0x6100067: diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index 58315eab492a..39a7d39536fe 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -180,33 +180,6 @@ static SOC_ENUM_SINGLE_DECL( static const struct snd_kcontrol_new tas2764_asi1_mux = SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum);
-static int tas2764_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - int ret; - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - tas2764->dac_powered = true; - ret = tas2764_update_pwr_ctrl(tas2764); - break; - case SND_SOC_DAPM_PRE_PMD: - tas2764->dac_powered = false; - ret = tas2764_update_pwr_ctrl(tas2764); - break; - default: - dev_err(tas2764->dev, "Unsupported event\n"); - return -EINVAL; - } - - if (ret < 0) - return ret; - - return 0; -} - static const struct snd_kcontrol_new isense_switch = SOC_DAPM_SINGLE("Switch", TAS2764_PWR_CTRL, TAS2764_ISENSE_POWER_EN, 1, 1); static const struct snd_kcontrol_new vsense_switch = @@ -219,8 +192,7 @@ static const struct snd_soc_dapm_widget tas2764_dapm_widgets[] = { 1, &isense_switch), SND_SOC_DAPM_SWITCH("VSENSE", TAS2764_PWR_CTRL, TAS2764_VSENSE_POWER_EN, 1, &vsense_switch), - SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2764_dac_event, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUTPUT("OUT"), SND_SOC_DAPM_SIGGEN("VMON"), SND_SOC_DAPM_SIGGEN("IMON") @@ -241,9 +213,28 @@ static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction) { struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(dai->component); + int ret; + + if (!mute) { + tas2764->dac_powered = true; + ret = tas2764_update_pwr_ctrl(tas2764); + if (ret) + return ret; + }
tas2764->unmuted = !mute; - return tas2764_update_pwr_ctrl(tas2764); + ret = tas2764_update_pwr_ctrl(tas2764); + if (ret) + return ret; + + if (mute) { + tas2764->dac_powered = false; + ret = tas2764_update_pwr_ctrl(tas2764); + if (ret) + return ret; + } + + return 0; }
static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth) @@ -634,6 +625,7 @@ static const struct reg_default tas2764_reg_defaults[] = { { TAS2764_TDM_CFG2, 0x0a }, { TAS2764_TDM_CFG3, 0x10 }, { TAS2764_TDM_CFG5, 0x42 }, + { TAS2764_INT_CLK_CFG, 0x19 }, };
static const struct regmap_range_cfg tas2764_regmap_ranges[] = { @@ -651,6 +643,7 @@ static const struct regmap_range_cfg tas2764_regmap_ranges[] = { static bool tas2764_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { + case TAS2764_SW_RST: case TAS2764_INT_LTCH0 ... TAS2764_INT_LTCH4: case TAS2764_INT_CLK_CFG: return true; diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c index 47da5674d7c9..e31b7fb104e6 100644 --- a/sound/soc/codecs/wsa883x.c +++ b/sound/soc/codecs/wsa883x.c @@ -529,7 +529,7 @@ static const struct sdw_port_config wsa883x_pconfig[WSA883X_MAX_SWR_PORTS] = { }, [WSA883X_PORT_VISENSE] = { .num = WSA883X_PORT_VISENSE + 1, - .ch_mask = 0x3, + .ch_mask = 0x1, }, };
diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c index 560a2c04b695..18b0ee8f15a5 100644 --- a/sound/soc/codecs/wsa884x.c +++ b/sound/soc/codecs/wsa884x.c @@ -891,7 +891,7 @@ static const struct sdw_port_config wsa884x_pconfig[WSA884X_MAX_SWR_PORTS] = { }, [WSA884X_PORT_VISENSE] = { .num = WSA884X_PORT_VISENSE + 1, - .ch_mask = 0x3, + .ch_mask = 0x1, }, [WSA884X_PORT_CPS] = { .num = WSA884X_PORT_CPS + 1, diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index 93dbe40008c0..e5ae435171d6 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -516,7 +516,7 @@ static int imx_card_parse_of(struct imx_card_data *data) if (!card->dai_link) return -ENOMEM;
- data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL); + data->link_data = devm_kcalloc(dev, num_links, sizeof(*link_data), GFP_KERNEL); if (!data->link_data) return -ENOMEM;
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 1148e9498d8e..b6434b473126 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -576,6 +576,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF2 | BYT_RT5640_MCLK_EN), }, + { /* Acer Aspire SW3-013 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-clk.c b/sound/soc/mediatek/mt8188/mt8188-afe-clk.c index e69c1bb2cb23..7f411b857782 100644 --- a/sound/soc/mediatek/mt8188/mt8188-afe-clk.c +++ b/sound/soc/mediatek/mt8188/mt8188-afe-clk.c @@ -58,7 +58,15 @@ static const char *aud_clks[MT8188_CLK_NUM] = { [MT8188_CLK_AUD_ADC] = "aud_adc", [MT8188_CLK_AUD_DAC_HIRES] = "aud_dac_hires", [MT8188_CLK_AUD_A1SYS_HP] = "aud_a1sys_hp", + [MT8188_CLK_AUD_AFE_DMIC1] = "aud_afe_dmic1", + [MT8188_CLK_AUD_AFE_DMIC2] = "aud_afe_dmic2", + [MT8188_CLK_AUD_AFE_DMIC3] = "aud_afe_dmic3", + [MT8188_CLK_AUD_AFE_DMIC4] = "aud_afe_dmic4", [MT8188_CLK_AUD_ADC_HIRES] = "aud_adc_hires", + [MT8188_CLK_AUD_DMIC_HIRES1] = "aud_dmic_hires1", + [MT8188_CLK_AUD_DMIC_HIRES2] = "aud_dmic_hires2", + [MT8188_CLK_AUD_DMIC_HIRES3] = "aud_dmic_hires3", + [MT8188_CLK_AUD_DMIC_HIRES4] = "aud_dmic_hires4", [MT8188_CLK_AUD_I2SIN] = "aud_i2sin", [MT8188_CLK_AUD_TDM_IN] = "aud_tdm_in", [MT8188_CLK_AUD_I2S_OUT] = "aud_i2s_out", diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-clk.h b/sound/soc/mediatek/mt8188/mt8188-afe-clk.h index ec53c171c170..c6c78d684f3e 100644 --- a/sound/soc/mediatek/mt8188/mt8188-afe-clk.h +++ b/sound/soc/mediatek/mt8188/mt8188-afe-clk.h @@ -54,7 +54,15 @@ enum { MT8188_CLK_AUD_ADC, MT8188_CLK_AUD_DAC_HIRES, MT8188_CLK_AUD_A1SYS_HP, + MT8188_CLK_AUD_AFE_DMIC1, + MT8188_CLK_AUD_AFE_DMIC2, + MT8188_CLK_AUD_AFE_DMIC3, + MT8188_CLK_AUD_AFE_DMIC4, MT8188_CLK_AUD_ADC_HIRES, + MT8188_CLK_AUD_DMIC_HIRES1, + MT8188_CLK_AUD_DMIC_HIRES2, + MT8188_CLK_AUD_DMIC_HIRES3, + MT8188_CLK_AUD_DMIC_HIRES4, MT8188_CLK_AUD_I2SIN, MT8188_CLK_AUD_TDM_IN, MT8188_CLK_AUD_I2S_OUT, diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c index 73e5c63aeec8..d36520c6272d 100644 --- a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c +++ b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c @@ -2855,10 +2855,6 @@ static bool mt8188_is_volatile_reg(struct device *dev, unsigned int reg) case AFE_DMIC3_SRC_DEBUG_MON0: case AFE_DMIC3_UL_SRC_MON0: case AFE_DMIC3_UL_SRC_MON1: - case DMIC_GAIN1_CUR: - case DMIC_GAIN2_CUR: - case DMIC_GAIN3_CUR: - case DMIC_GAIN4_CUR: case ETDM_IN1_MONITOR: case ETDM_IN2_MONITOR: case ETDM_OUT1_MONITOR: diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c index 19adadedc88a..1001fd321380 100644 --- a/sound/soc/qcom/sm8250.c +++ b/sound/soc/qcom/sm8250.c @@ -7,6 +7,7 @@ #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/pcm.h> +#include <sound/pcm_params.h> #include <linux/soundwire/sdw.h> #include <sound/jack.h> #include <linux/input-event-codes.h> @@ -39,9 +40,11 @@ static int sm8250_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
rate->min = rate->max = 48000; channels->min = channels->max = 2; + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
return 0; } diff --git a/sound/soc/sdw_utils/soc_sdw_cs42l43.c b/sound/soc/sdw_utils/soc_sdw_cs42l43.c index adb1c008e871..2dc7787234c3 100644 --- a/sound/soc/sdw_utils/soc_sdw_cs42l43.c +++ b/sound/soc/sdw_utils/soc_sdw_cs42l43.c @@ -20,6 +20,8 @@ #include <sound/soc-dapm.h> #include <sound/soc_sdw_utils.h>
+#define CS42L43_SPK_VOLUME_0DB 128 /* 0dB Max */ + static const struct snd_soc_dapm_route cs42l43_hs_map[] = { { "Headphone", NULL, "cs42l43 AMP3_OUT" }, { "Headphone", NULL, "cs42l43 AMP4_OUT" }, @@ -117,6 +119,14 @@ int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_so return -ENOMEM; }
+ ret = snd_soc_limit_volume(card, "cs42l43 Speaker Digital Volume", + CS42L43_SPK_VOLUME_0DB); + if (ret) + dev_err(card->dev, "cs42l43 speaker volume limit failed: %d\n", ret); + else + dev_info(card->dev, "Setting CS42L43 Speaker volume limit to %d\n", + CS42L43_SPK_VOLUME_0DB); + ret = snd_soc_dapm_add_routes(&card->dapm, cs42l43_spk_map, ARRAY_SIZE(cs42l43_spk_map)); if (ret) diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c index 4e08892d24c6..de09d21add45 100644 --- a/sound/soc/soc-dai.c +++ b/sound/soc/soc-dai.c @@ -275,10 +275,11 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
if (dai->driver->ops && dai->driver->ops->xlate_tdm_slot_mask) - dai->driver->ops->xlate_tdm_slot_mask(slots, - &tx_mask, &rx_mask); + ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); else - snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); + ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); + if (ret) + goto err;
for_each_pcm_streams(stream) snd_soc_dai_tdm_mask_set(dai, stream, *tdm_mask[stream]); @@ -287,6 +288,7 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, dai->driver->ops->set_tdm_slot) ret = dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask, slots, slot_width); +err: return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index b0e4e4168f38..fb11003d56cf 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -639,6 +639,33 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
+static int snd_soc_clip_to_platform_max(struct snd_kcontrol *kctl) +{ + struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; + struct snd_ctl_elem_value uctl; + int ret; + + if (!mc->platform_max) + return 0; + + ret = kctl->get(kctl, &uctl); + if (ret < 0) + return ret; + + if (uctl.value.integer.value[0] > mc->platform_max) + uctl.value.integer.value[0] = mc->platform_max; + + if (snd_soc_volsw_is_stereo(mc) && + uctl.value.integer.value[1] > mc->platform_max) + uctl.value.integer.value[1] = mc->platform_max; + + ret = kctl->put(kctl, &uctl); + if (ret < 0) + return ret; + + return 0; +} + /** * snd_soc_limit_volume - Set new limit to an existing volume control. * @@ -663,7 +690,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card, struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; if (max <= mc->max - mc->min) { mc->platform_max = max; - ret = 0; + ret = snd_soc_clip_to_platform_max(kctl); } } return ret; diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c index 1989147aa6a4..5fe237688b91 100644 --- a/sound/soc/sof/intel/hda-bus.c +++ b/sound/soc/sof/intel/hda-bus.c @@ -76,7 +76,7 @@ void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev)
snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops);
- if (chip && chip->hw_ip_version == SOF_INTEL_ACE_2_0) + if (chip && chip->hw_ip_version >= SOF_INTEL_ACE_2_0) bus->use_pio_for_commands = true; #else snd_hdac_ext_bus_init(bus, dev, NULL, NULL); diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index c924a998d6f9..9c8f79e55ec5 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1007,7 +1007,21 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev, if (!*mach && codec_num <= 2) { bool tplg_fixup = false;
- hda_mach = snd_soc_acpi_intel_hda_machines; + /* + * make a local copy of the match array since we might + * be modifying it + */ + hda_mach = devm_kmemdup_array(sdev->dev, + snd_soc_acpi_intel_hda_machines, + 2, /* we have one entry + sentinel in the array */ + sizeof(snd_soc_acpi_intel_hda_machines[0]), + GFP_KERNEL); + if (!hda_mach) { + dev_err(bus->dev, + "%s: failed to duplicate the HDA match table\n", + __func__); + return; + }
dev_info(bus->dev, "using HDA machine driver %s now\n", hda_mach->drv_name); diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c index 576f407cd456..976a4794d610 100644 --- a/sound/soc/sof/ipc4-control.c +++ b/sound/soc/sof/ipc4-control.c @@ -531,6 +531,14 @@ static int sof_ipc4_bytes_ext_put(struct snd_sof_control *scontrol, return -EINVAL; }
+ /* Check header id */ + if (header.numid != SOF_CTRL_CMD_BINARY) { + dev_err_ratelimited(scomp->dev, + "Incorrect numid for bytes put %d\n", + header.numid); + return -EINVAL; + } + /* Verify the ABI header first */ if (copy_from_user(&abi_hdr, tlvd->tlv, sizeof(abi_hdr))) return -EFAULT; @@ -613,7 +621,8 @@ static int _sof_ipc4_bytes_ext_get(struct snd_sof_control *scontrol, if (data_size > size) return -ENOSPC;
- header.numid = scontrol->comp_id; + /* Set header id and length */ + header.numid = SOF_CTRL_CMD_BINARY; header.length = data_size;
if (copy_to_user(tlvd, &header, sizeof(struct snd_ctl_tlv))) diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c index 4df2be3d39eb..2fe4969cdc3b 100644 --- a/sound/soc/sof/ipc4-pcm.c +++ b/sound/soc/sof/ipc4-pcm.c @@ -794,7 +794,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm
spcm->stream[stream].private = stream_priv;
- if (!support_info) + /* Delay reporting is only supported on playback */ + if (!support_info || stream == SNDRV_PCM_STREAM_CAPTURE) continue;
time_info = kzalloc(sizeof(*time_info), GFP_KERNEL); diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index 37ca15cc5728..f9708b8fd73b 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -1059,7 +1059,7 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, struct snd_sof_dai *dai) { struct snd_soc_card *card = scomp->card; - struct snd_soc_pcm_runtime *rtd; + struct snd_soc_pcm_runtime *rtd, *full, *partial; struct snd_soc_dai *cpu_dai; int stream; int i; @@ -1076,12 +1076,22 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp, else goto end;
+ full = NULL; + partial = NULL; list_for_each_entry(rtd, &card->rtd_list, list) { /* does stream match DAI link ? */ - if (!rtd->dai_link->stream_name || - !strstr(rtd->dai_link->stream_name, w->sname)) - continue; + if (rtd->dai_link->stream_name) { + if (!strcmp(rtd->dai_link->stream_name, w->sname)) { + full = rtd; + break; + } else if (strstr(rtd->dai_link->stream_name, w->sname)) { + partial = rtd; + } + } + }
+ rtd = full ? full : partial; + if (rtd) { for_each_rtd_cpu_dais(rtd, i, cpu_dai) { /* * Please create DAI widget in the right order diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index 330bc0c09f56..93dd88fb805d 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c @@ -21,6 +21,7 @@ #include <linux/gpio/consumer.h>
#include <sound/core.h> +#include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> @@ -235,6 +236,7 @@ struct sun4i_codec { struct clk *clk_module; struct reset_control *rst; struct gpio_desc *gpio_pa; + struct gpio_desc *gpio_hp;
/* ADC_FIFOC register is at different offset on different SoCs */ struct regmap_field *reg_adc_fifoc; @@ -1263,6 +1265,49 @@ static struct snd_soc_dai_driver dummy_cpu_dai = { .ops = &dummy_dai_ops, };
+static struct snd_soc_jack sun4i_headphone_jack; + +static struct snd_soc_jack_pin sun4i_headphone_jack_pins[] = { + { .pin = "Headphone", .mask = SND_JACK_HEADPHONE }, +}; + +static struct snd_soc_jack_gpio sun4i_headphone_jack_gpio = { + .name = "hp-det", + .report = SND_JACK_HEADPHONE, + .debounce_time = 150, +}; + +static int sun4i_codec_machine_init(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_card *card = rtd->card; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card); + int ret; + + if (scodec->gpio_hp) { + ret = snd_soc_card_jack_new_pins(card, "Headphone Jack", + SND_JACK_HEADPHONE, + &sun4i_headphone_jack, + sun4i_headphone_jack_pins, + ARRAY_SIZE(sun4i_headphone_jack_pins)); + if (ret) { + dev_err(rtd->dev, + "Headphone jack creation failed: %d\n", ret); + return ret; + } + + sun4i_headphone_jack_gpio.desc = scodec->gpio_hp; + ret = snd_soc_jack_add_gpios(&sun4i_headphone_jack, 1, + &sun4i_headphone_jack_gpio); + + if (ret) { + dev_err(rtd->dev, "Headphone GPIO not added: %d\n", ret); + return ret; + } + } + + return 0; +} + static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, int *num_links) { @@ -1288,6 +1333,7 @@ static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, link->codecs->name = dev_name(dev); link->platforms->name = dev_name(dev); link->dai_fmt = SND_SOC_DAIFMT_I2S; + link->init = sun4i_codec_machine_init;
*num_links = 1;
@@ -1728,6 +1774,13 @@ static int sun4i_codec_probe(struct platform_device *pdev) return ret; }
+ scodec->gpio_hp = devm_gpiod_get_optional(&pdev->dev, "hp-det", GPIOD_IN); + if (IS_ERR(scodec->gpio_hp)) { + ret = PTR_ERR(scodec->gpio_hp); + dev_err_probe(&pdev->dev, ret, "Failed to get hp-det gpio\n"); + return ret; + } + /* reg_field setup */ scodec->reg_adc_fifoc = devm_regmap_field_alloc(&pdev->dev, scodec->regmap, diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 826ac870f246..a792ada18863 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1885,10 +1885,18 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi, }
port_info = find_port_info(umidi, number); - name_format = port_info ? port_info->name : - (jack_name != default_jack_name ? "%s %s" : "%s %s %d"); - snprintf(substream->name, sizeof(substream->name), - name_format, umidi->card->shortname, jack_name, number + 1); + if (port_info || jack_name == default_jack_name || + strncmp(umidi->card->shortname, jack_name, strlen(umidi->card->shortname)) != 0) { + name_format = port_info ? port_info->name : + (jack_name != default_jack_name ? "%s %s" : "%s %s %d"); + snprintf(substream->name, sizeof(substream->name), + name_format, umidi->card->shortname, jack_name, number + 1); + } else { + /* The manufacturer included the iProduct name in the jack + * name, do not use both + */ + strscpy(substream->name, jack_name); + }
*rsubstream = substream; } diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 9b75639434b8..0a764426d935 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -461,10 +461,11 @@ int get_fd_type(int fd) p_err("can't read link type: %s", strerror(errno)); return -1; } - if (n == sizeof(path)) { + if (n == sizeof(buf)) { p_err("can't read link type: path too long!"); return -1; } + buf[n] = '\0';
if (strstr(buf, "bpf-map")) return BPF_OBJ_MAP; diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build index 5fb3fb3d97e0..ffe988867703 100644 --- a/tools/build/Makefile.build +++ b/tools/build/Makefile.build @@ -149,6 +149,10 @@ objprefix := $(subst ./,,$(OUTPUT)$(dir)/) obj-y := $(addprefix $(objprefix),$(obj-y)) subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
+# Separate out test log files from real build objects. +test-y := $(filter %_log, $(obj-y)) +obj-y := $(filter-out %_log, $(obj-y)) + # Final '$(obj)-in.o' object in-target := $(objprefix)$(obj)-in.o
@@ -159,7 +163,7 @@ $(subdir-y):
$(sort $(subdir-obj-y)): $(subdir-y) ;
-$(in-target): $(obj-y) FORCE +$(in-target): $(obj-y) $(test-y) FORCE $(call rule_mkdir) $(call if_changed,$(host)ld_multi)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4a939c90dc2e..552fd633f820 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1206,6 +1206,7 @@ enum bpf_perf_event_type { #define BPF_F_BEFORE (1U << 3) #define BPF_F_AFTER (1U << 4) #define BPF_F_ID (1U << 5) +#define BPF_F_PREORDER (1U << 6) #define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5ff643e60d09..6e4d417604fa 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2074,7 +2074,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, }
len = strlen(value); - if (value[len - 1] != '"') { + if (len < 2 || value[len - 1] != '"') { pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c index ce32cb35007d..c4da34048ef8 100644 --- a/tools/net/ynl/lib/ynl.c +++ b/tools/net/ynl/lib/ynl.c @@ -364,7 +364,7 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr) "Invalid attribute (binary %s)", policy->name); return -1; case YNL_PT_NUL_STR: - if ((!policy->len || len <= policy->len) && !data[len - 1]) + if (len && (!policy->len || len <= policy->len) && !data[len - 1]) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (string %s)", policy->name); diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index 463f1394ab97..c78f1c1bca75 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -2417,6 +2417,9 @@ def render_uapi(family, cw):
defines = [] for const in family['definitions']: + if const.get('header'): + continue + if const['type'] != 'const': cw.writes_defines(defines) defines = [] diff --git a/tools/objtool/check.c b/tools/objtool/check.c index bea6461ac340..4fce0074076f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -3353,7 +3353,7 @@ static int handle_insn_ops(struct instruction *insn, if (update_cfi_state(insn, next_insn, &state->cfi, op)) return 1;
- if (!insn->alt_group) + if (!opts.uaccess || !insn->alt_group) continue;
if (op->dest.type == OP_DEST_PUSHF) { @@ -3820,6 +3820,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, return 0;
case INSN_STAC: + if (!opts.uaccess) + break; + if (state.uaccess) { WARN_INSN(insn, "recursive UACCESS enable"); return 1; @@ -3829,6 +3832,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, break;
case INSN_CLAC: + if (!opts.uaccess) + break; + if (!state.uaccess && func) { WARN_INSN(insn, "redundant UACCESS disable"); return 1; @@ -4304,7 +4310,8 @@ static int validate_symbol(struct objtool_file *file, struct section *sec, if (!insn || insn->ignore || insn->visited) return 0;
- state->uaccess = sym->uaccess_safe; + if (opts.uaccess) + state->uaccess = sym->uaccess_safe;
ret = validate_branch(file, insn_func(insn), insn, *state); if (ret) @@ -4751,8 +4758,10 @@ int check(struct objtool_file *file) init_cfi_state(&force_undefined_cfi); force_undefined_cfi.force_undefined = true;
- if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) + if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) { + ret = -1; goto out; + }
cfi_hash_add(&init_cfi); cfi_hash_add(&func_cfi); @@ -4769,7 +4778,7 @@ int check(struct objtool_file *file) if (opts.retpoline) { ret = validate_retpoline(file); if (ret < 0) - return ret; + goto out; warnings += ret; }
@@ -4805,7 +4814,7 @@ int check(struct objtool_file *file) */ ret = validate_unrets(file); if (ret < 0) - return ret; + goto out; warnings += ret; }
@@ -4868,7 +4877,7 @@ int check(struct objtool_file *file) if (opts.prefix) { ret = add_prefix_symbols(file); if (ret < 0) - return ret; + goto out; warnings += ret; }
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index a3cf1d17163a..e4b00e13302b 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -199,6 +199,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics \fBUncMHz\fP per-package uncore MHz, instantaneous sample. .PP \fBUMHz1.0\fP per-package uncore MHz for domain=1 and fabric_cluster=0, instantaneous sample. System summary is the average of all packages. +For the "--show" and "--hide" options, use "UncMHz" to operate on all UMHz*.* as a group. .SH TOO MUCH INFORMATION EXAMPLE By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters. This is ideal for remote debugging, use the "--out" option to save everything to a text file, and get that file to the expert helping you debug. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 77ef60980ee5..12424bf08551 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -6445,7 +6445,18 @@ static void probe_intel_uncore_frequency_cluster(void) sprintf(path, "%s/current_freq_khz", path_base); sprintf(name_buf, "UMHz%d.%d", domain_id, cluster_id);
- add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id); + /* + * Once add_couter() is called, that counter is always read + * and reported -- So it is effectively (enabled & present). + * Only call add_counter() here if legacy BIC_UNCORE_MHZ (UncMHz) + * is (enabled). Since we are in this routine, we + * know we will not probe and set (present) the legacy counter. + * + * This allows "--show/--hide UncMHz" to be effective for + * the clustered MHz counters, as a group. + */ + if BIC_IS_ENABLED(BIC_UNCORE_MHZ) + add_counter(0, path, name_buf, 0, SCOPE_PACKAGE, COUNTER_K2M, FORMAT_AVERAGE, 0, package_id);
if (quiet) continue; diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py index dc7949076863..4a6bf4e048f5 100644 --- a/tools/testing/kunit/qemu_configs/x86_64.py +++ b/tools/testing/kunit/qemu_configs/x86_64.py @@ -7,4 +7,6 @@ CONFIG_SERIAL_8250_CONSOLE=y''', qemu_arch='x86_64', kernel_path='arch/x86/boot/bzImage', kernel_command_line='console=ttyS0', - extra_qemu_params=[]) + # qboot is faster than SeaBIOS and doesn't mess up + # the terminal. + extra_qemu_params=['-bios', 'qboot.rom']) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 2d0796314862..0a99fd404f6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -68,7 +68,6 @@ static void test_sockmap_ktls_disconnect_after_delete(int family, int map) goto close_cli;
err = disconnect(cli); - ASSERT_OK(err, "disconnect");
close_cli: close(cli); diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 4927b9add5ad..06f252733660 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -289,6 +289,10 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested) &test_hwpt_id); test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0, &test_hwpt_id); + test_err_hwpt_alloc(EOPNOTSUPP, self->device_id, self->ioas_id, + IOMMU_HWPT_ALLOC_NEST_PARENT | + IOMMU_HWPT_FAULT_ID_VALID, + &test_hwpt_id);
test_cmd_hwpt_alloc(self->device_id, self->ioas_id, IOMMU_HWPT_ALLOC_NEST_PARENT, diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh index d9d587454d20..8c1597ebc2d3 100755 --- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh +++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh @@ -149,7 +149,7 @@ cfg_test_host_common() check_err $? "Failed to add $name host entry"
bridge mdb replace dev br0 port br0 grp $grp $state vid 10 &> /dev/null - check_fail $? "Managed to replace $name host entry" + check_err $? "Failed to replace $name host entry"
bridge mdb del dev br0 port br0 grp $grp $state vid 10 bridge mdb get dev br0 grp $grp vid 10 &> /dev/null diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh index 02c21ff4ca81..aabd6e5480b8 100755 --- a/tools/testing/selftests/net/gro.sh +++ b/tools/testing/selftests/net/gro.sh @@ -100,5 +100,6 @@ trap cleanup EXIT if [[ "${test}" == "all" ]]; then run_all_tests else - run_test "${proto}" "${test}" + exit_code=$(run_test "${proto}" "${test}") + exit $exit_code fi;
linux-stable-mirror@lists.linaro.org