I'm announcing the release of the 6.12.64 kernel.
All users of the 6.12 kernel series must upgrade.
The updated 6.12.y git tree can be found at: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git linux-6.12.y and can be browsed at the normal kernel.org git web browser: https://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git%3Ba=summa...
thanks,
greg k-h
------------
Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml | 2 Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml | 5 Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml | 3 Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml | 5 Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml | 5 Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml | 5 Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml | 5 Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml | 5 Documentation/driver-api/soundwire/stream.rst | 2 Documentation/driver-api/tty/tty_port.rst | 5 Documentation/filesystems/nfs/localio.rst | 85 Makefile | 2 arch/arm/boot/dts/microchip/sama5d2.dtsi | 10 arch/arm/boot/dts/microchip/sama7g5.dtsi | 4 arch/arm64/boot/dts/ti/k3-j721e-sk.dts | 12 arch/arm64/include/asm/el2_setup.h | 57 arch/arm64/kernel/head.S | 22 arch/arm64/kvm/hyp/nvhe/hyp-init.S | 10 arch/arm64/kvm/hyp/nvhe/psci-relay.c | 3 arch/arm64/net/bpf_jit_comp.c | 2 arch/loongarch/include/asm/pgtable.h | 4 arch/loongarch/kernel/mcount_dyn.S | 14 arch/loongarch/kernel/relocate.c | 4 arch/loongarch/kernel/setup.c | 8 arch/loongarch/kernel/switch.S | 4 arch/loongarch/net/bpf_jit.c | 18 arch/loongarch/net/bpf_jit.h | 26 arch/loongarch/pci/pci.c | 2 arch/mips/kernel/ftrace.c | 25 arch/mips/sgi-ip22/ip22-gio.c | 3 arch/parisc/kernel/asm-offsets.c | 2 arch/parisc/kernel/entry.S | 16 arch/powerpc/boot/addnote.c | 7 arch/powerpc/include/asm/book3s/32/tlbflush.h | 5 arch/powerpc/include/asm/book3s/64/mmu-hash.h | 1 arch/powerpc/kernel/btext.c | 3 arch/powerpc/kernel/process.c | 5 arch/powerpc/kexec/core_64.c | 19 arch/powerpc/mm/book3s32/tlb.c | 9 arch/powerpc/mm/book3s64/internal.h | 2 arch/powerpc/mm/book3s64/mmu_context.c | 2 arch/powerpc/mm/book3s64/slb.c | 88 arch/powerpc/platforms/pseries/cmm.c | 5 arch/riscv/crypto/chacha-riscv64-zvkb.S | 5 arch/s390/include/uapi/asm/ipl.h | 1 arch/s390/kernel/ipl.c | 48 arch/x86/crypto/blake2s-core.S | 4 arch/x86/entry/common.c | 72 arch/x86/events/amd/core.c | 7 arch/x86/events/amd/uncore.c | 5 arch/x86/include/asm/irq_remapping.h | 7 arch/x86/include/asm/ptrace.h | 20 arch/x86/kernel/cpu/mce/threshold.c | 3 arch/x86/kernel/cpu/microcode/amd.c | 106 arch/x86/kernel/fpu/xstate.c | 4 arch/x86/kernel/irq.c | 23 arch/x86/kvm/lapic.c | 32 arch/x86/kvm/svm/nested.c | 6 arch/x86/kvm/svm/svm.c | 44 arch/x86/kvm/svm/svm.h | 7 arch/x86/kvm/vmx/nested.c | 3 arch/x86/kvm/x86.c | 25 arch/x86/xen/enlighten_pv.c | 69 block/blk-mq.c | 2 block/blk-zoned.c | 191 - block/blk.h | 14 block/genhd.c | 2 crypto/af_alg.c | 5 crypto/algif_hash.c | 3 crypto/algif_rng.c | 3 crypto/seqiv.c | 8 drivers/acpi/acpi_pcc.c | 2 drivers/acpi/acpica/nswalk.c | 9 drivers/acpi/cppc_acpi.c | 3 drivers/acpi/fan.h | 33 drivers/acpi/fan_hwmon.c | 10 drivers/acpi/property.c | 8 drivers/amba/tegra-ahb.c | 1 drivers/base/power/runtime.c | 22 drivers/block/floppy.c | 2 drivers/block/rnbd/rnbd-clt.c | 13 drivers/block/rnbd/rnbd-clt.h | 2 drivers/bluetooth/btusb.c | 22 drivers/bus/ti-sysc.c | 11 drivers/char/applicom.c | 5 drivers/char/ipmi/ipmi_msghandler.c | 20 drivers/char/tpm/tpm-chip.c | 1 drivers/char/tpm/tpm1-cmd.c | 5 drivers/char/tpm/tpm2-cmd.c | 11 drivers/char/tpm/tpm2-sessions.c | 85 drivers/clk/mvebu/cp110-system-controller.c | 20 drivers/clk/qcom/dispcc-sm7150.c | 2 drivers/clk/samsung/clk-exynos-clkout.c | 2 drivers/cpufreq/cpufreq-dt-platdev.c | 1 drivers/cpufreq/cpufreq-nforce2.c | 3 drivers/cpufreq/s5pv210-cpufreq.c | 6 drivers/cpuidle/governors/menu.c | 9 drivers/cpuidle/governors/teo.c | 7 drivers/crypto/caam/caamrng.c | 4 drivers/firewire/nosy.c | 10 drivers/firmware/imx/imx-scu-irq.c | 4 drivers/firmware/stratix10-svc.c | 11 drivers/gpio/Makefile | 1 drivers/gpio/gpio-regmap.c | 2 drivers/gpio/gpiolib-acpi-core.c | 1419 ++++++++ drivers/gpio/gpiolib-acpi-quirks.c | 412 ++ drivers/gpio/gpiolib-acpi.c | 1737 ---------- drivers/gpio/gpiolib-acpi.h | 15 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 27 drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 27 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 62 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm | 37 drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 1 drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 59 drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 2 drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 8 drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 8 drivers/gpu/drm/drm_buddy.c | 394 +- drivers/gpu/drm/drm_displayid.c | 58 drivers/gpu/drm/drm_displayid_internal.h | 2 drivers/gpu/drm/gma500/fbdev.c | 43 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 37 drivers/gpu/drm/i915/intel_memory_region.h | 2 drivers/gpu/drm/imagination/pvr_gem.c | 11 drivers/gpu/drm/mediatek/mtk_ddp_comp.c | 33 drivers/gpu/drm/mediatek/mtk_ddp_comp.h | 2 drivers/gpu/drm/mediatek/mtk_dp.c | 1 drivers/gpu/drm/mediatek/mtk_drm_drv.c | 4 drivers/gpu/drm/mgag200/mgag200_mode.c | 25 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 2 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 10 drivers/gpu/drm/nouveau/dispnv50/atom.h | 13 drivers/gpu/drm/nouveau/dispnv50/wndw.c | 2 drivers/gpu/drm/panel/panel-sony-td4353-jdi.c | 2 drivers/gpu/drm/panthor/panthor_gem.c | 18 drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 drivers/gpu/drm/xe/xe_bo.c | 15 drivers/gpu/drm/xe/xe_dma_buf.c | 2 drivers/gpu/drm/xe/xe_exec.c | 3 drivers/gpu/drm/xe/xe_gt.c | 7 drivers/gpu/drm/xe/xe_guc_submit.c | 20 drivers/gpu/drm/xe/xe_heci_gsc.c | 4 drivers/gpu/drm/xe/xe_oa.c | 13 drivers/gpu/drm/xe/xe_vm.c | 8 drivers/gpu/drm/xe/xe_vm_types.h | 2 drivers/hid/hid-input.c | 18 drivers/hid/hid-logitech-dj.c | 56 drivers/hwmon/dell-smm-hwmon.c | 9 drivers/hwmon/ibmpex.c | 9 drivers/hwmon/ltc4282.c | 9 drivers/hwmon/max16065.c | 7 drivers/hwmon/max6697.c | 2 drivers/hwmon/tmp401.c | 2 drivers/hwmon/w83791d.c | 17 drivers/hwmon/w83l786ng.c | 26 drivers/hwtracing/intel_th/core.c | 20 drivers/i2c/busses/i2c-amd-mp2-pci.c | 5 drivers/i2c/busses/i2c-designware-core.h | 1 drivers/i2c/busses/i2c-designware-master.c | 7 drivers/iio/adc/ti_am335x_adc.c | 2 drivers/infiniband/core/addr.c | 33 drivers/infiniband/core/cma.c | 3 drivers/infiniband/core/device.c | 4 drivers/infiniband/core/verbs.c | 2 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 7 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 2 drivers/infiniband/hw/bnxt_re/qplib_res.c | 8 drivers/infiniband/hw/efa/efa_verbs.c | 4 drivers/infiniband/hw/irdma/utils.c | 3 drivers/infiniband/ulp/rtrs/rtrs-clt.c | 1 drivers/input/keyboard/lkkbd.c | 5 drivers/input/mouse/alps.c | 1 drivers/input/serio/i8042-acpipnpio.h | 7 drivers/input/touchscreen/ti_am335x_tsc.c | 2 drivers/interconnect/qcom/sdx75.c | 26 drivers/interconnect/qcom/sdx75.h | 2 drivers/iommu/amd/init.c | 15 drivers/iommu/amd/iommu.c | 2 drivers/iommu/apple-dart.c | 2 drivers/iommu/arm/arm-smmu/qcom_iommu.c | 10 drivers/iommu/exynos-iommu.c | 9 drivers/iommu/intel/irq_remapping.c | 8 drivers/iommu/iommu-sva.c | 3 drivers/iommu/iommufd/selftest.c | 8 drivers/iommu/ipmmu-vmsa.c | 2 drivers/iommu/mtk_iommu.c | 27 drivers/iommu/mtk_iommu_v1.c | 25 drivers/iommu/omap-iommu.c | 2 drivers/iommu/omap-iommu.h | 2 drivers/iommu/sun50i-iommu.c | 2 drivers/iommu/tegra-smmu.c | 5 drivers/isdn/capi/capi.c | 8 drivers/leds/leds-cros_ec.c | 5 drivers/leds/leds-lp50xx.c | 67 drivers/md/dm-bufio.c | 10 drivers/md/dm-ebs-target.c | 2 drivers/md/md.c | 5 drivers/md/raid10.c | 3 drivers/md/raid5.c | 10 drivers/media/cec/core/cec-core.c | 1 drivers/media/common/videobuf2/videobuf2-dma-contig.c | 1 drivers/media/i2c/adv7604.c | 4 drivers/media/i2c/adv7842.c | 11 drivers/media/i2c/imx219.c | 9 drivers/media/i2c/msp3400-kthreads.c | 2 drivers/media/i2c/tda1997x.c | 1 drivers/media/platform/amphion/vpu_malone.c | 35 drivers/media/platform/amphion/vpu_v4l2.c | 28 drivers/media/platform/amphion/vpu_v4l2.h | 18 drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c | 14 drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c | 14 drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c | 12 drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h | 2 drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c | 5 drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c | 12 drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h | 2 drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c | 5 drivers/media/platform/renesas/rcar_drif.c | 1 drivers/media/platform/samsung/exynos4-is/media-dev.c | 10 drivers/media/platform/ti/davinci/vpif_capture.c | 4 drivers/media/platform/ti/davinci/vpif_display.c | 4 drivers/media/platform/verisilicon/hantro_g2.c | 88 drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c | 17 drivers/media/platform/verisilicon/hantro_g2_regs.h | 13 drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c | 2 drivers/media/platform/verisilicon/hantro_hw.h | 1 drivers/media/platform/verisilicon/imx8m_vpu_hw.c | 2 drivers/media/rc/st_rc.c | 2 drivers/media/test-drivers/vidtv/vidtv_channel.c | 3 drivers/media/usb/dvb-usb/dtv5100.c | 5 drivers/media/usb/pvrusb2/pvrusb2-hdw.c | 2 drivers/mfd/altera-sysmgr.c | 2 drivers/mfd/max77620.c | 15 drivers/misc/mei/Kconfig | 2 drivers/misc/vmw_balloon.c | 3 drivers/mmc/host/Kconfig | 4 drivers/mmc/host/sdhci-msm.c | 27 drivers/mmc/host/sdhci-of-arasan.c | 2 drivers/mtd/mtdpart.c | 7 drivers/mtd/spi-nor/winbond.c | 24 drivers/net/can/usb/gs_usb.c | 2 drivers/net/dsa/b53/b53_common.c | 3 drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 2 drivers/net/ethernet/broadcom/b44.c | 3 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 3 drivers/net/ethernet/cadence/macb_main.c | 3 drivers/net/ethernet/freescale/enetc/enetc.c | 3 drivers/net/ethernet/freescale/fec_main.c | 7 drivers/net/ethernet/google/gve/gve_main.c | 2 drivers/net/ethernet/google/gve/gve_utils.c | 2 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 4 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 4 drivers/net/ethernet/intel/e1000/e1000_main.c | 10 drivers/net/ethernet/intel/i40e/i40e.h | 11 drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 12 drivers/net/ethernet/intel/i40e/i40e_main.c | 1 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 drivers/net/ethernet/intel/iavf/iavf_main.c | 4 drivers/net/ethernet/intel/idpf/idpf_dev.c | 3 drivers/net/ethernet/intel/idpf/idpf_lib.c | 2 drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c | 61 drivers/net/ethernet/intel/idpf/idpf_txrx.c | 750 +--- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 95 drivers/net/ethernet/intel/idpf/idpf_vf_dev.c | 3 drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c | 8 drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 5 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 97 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h | 1 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 48 drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h | 1 drivers/net/ethernet/mellanox/mlx5/core/main.c | 1 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 2 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 27 drivers/net/ethernet/realtek/r8169_main.c | 5 drivers/net/ethernet/smsc/smc91x.c | 10 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 17 drivers/net/fjes/fjes_hw.c | 12 drivers/net/ipvlan/ipvlan_core.c | 3 drivers/net/mdio/mdio-aspeed.c | 7 drivers/net/phy/marvell-88q2xxx.c | 2 drivers/net/team/team_core.c | 2 drivers/net/usb/asix_common.c | 5 drivers/net/usb/rtl8150.c | 2 drivers/net/usb/sr9700.c | 4 drivers/net/usb/usbnet.c | 2 drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c | 14 drivers/net/wireless/mediatek/mt76/eeprom.c | 37 drivers/net/wireless/mediatek/mt76/mt7615/main.c | 4 drivers/net/wireless/mediatek/mt76/mt7615/pci.c | 6 drivers/net/wireless/mediatek/mt76/mt7615/sdio.c | 4 drivers/net/wireless/mediatek/mt76/mt7615/usb.c | 4 drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c | 4 drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h | 3 drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 2 drivers/net/wireless/mediatek/mt76/mt7921/pci.c | 6 drivers/net/wireless/mediatek/mt76/mt7921/sdio.c | 6 drivers/net/wireless/mediatek/mt76/mt7921/usb.c | 4 drivers/net/wireless/mediatek/mt76/mt7925/init.c | 24 drivers/net/wireless/mediatek/mt76/mt7925/mcu.c | 51 drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h | 21 drivers/net/wireless/mediatek/mt76/mt7925/pci.c | 33 drivers/net/wireless/mediatek/mt76/mt7925/usb.c | 20 drivers/net/wireless/mediatek/mt76/mt792x.h | 2 drivers/net/wireless/realtek/rtl8xxxu/core.c | 7 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c | 3 drivers/net/wireless/realtek/rtw88/sdio.c | 4 drivers/nfc/pn533/usb.c | 2 drivers/nvme/host/fabrics.c | 2 drivers/nvme/host/fc.c | 6 drivers/of/fdt.c | 2 drivers/parisc/gsc.c | 4 drivers/pci/controller/pcie-brcmstb.c | 107 drivers/pci/pci-driver.c | 4 drivers/perf/arm_cspmu/arm_cspmu.c | 4 drivers/phy/broadcom/phy-bcm63xx-usbh.c | 6 drivers/pinctrl/renesas/pinctrl-rzg2l.c | 75 drivers/platform/chrome/cros_ec_ishtp.c | 1 drivers/platform/mellanox/mlxbf-pmc.c | 14 drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c | 4 drivers/platform/x86/hp/hp-bioscfg/int-attributes.c | 2 drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c | 5 drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c | 5 drivers/platform/x86/hp/hp-bioscfg/string-attributes.c | 2 drivers/platform/x86/ibm_rtl.c | 2 drivers/platform/x86/intel/chtwc_int33fe.c | 29 drivers/platform/x86/intel/hid.c | 12 drivers/platform/x86/msi-laptop.c | 3 drivers/pmdomain/imx/gpc.c | 5 drivers/rpmsg/qcom_glink_native.c | 8 drivers/s390/block/dasd_eckd.c | 8 drivers/scsi/aic94xx/aic94xx_init.c | 3 drivers/scsi/mpi3mr/mpi/mpi30_ioc.h | 1 drivers/scsi/mpi3mr/mpi3mr_fw.c | 2 drivers/scsi/qla2xxx/qla_def.h | 1 drivers/scsi/qla2xxx/qla_gbl.h | 2 drivers/scsi/qla2xxx/qla_isr.c | 32 drivers/scsi/qla2xxx/qla_mbx.c | 2 drivers/scsi/qla2xxx/qla_mid.c | 4 drivers/scsi/qla2xxx/qla_os.c | 14 drivers/scsi/scsi_debug.c | 2 drivers/scsi/smartpqi/smartpqi_init.c | 4 drivers/soc/amlogic/meson-canvas.c | 5 drivers/soc/apple/mailbox.c | 15 drivers/soc/qcom/ocmem.c | 2 drivers/soc/qcom/qcom-pbs.c | 2 drivers/soc/samsung/exynos-pmu.c | 2 drivers/soc/tegra/fuse/fuse-tegra.c | 2 drivers/soundwire/stream.c | 6 drivers/spi/spi-cadence-quadspi.c | 4 drivers/spi/spi-fsl-spi.c | 2 drivers/staging/greybus/uart.c | 7 drivers/target/target_core_transport.c | 1 drivers/tty/serial/serial_base_bus.c | 8 drivers/tty/serial/serial_core.c | 7 drivers/tty/serial/sh-sci.c | 2 drivers/tty/serial/sprd_serial.c | 6 drivers/tty/serial/xilinx_uartps.c | 16 drivers/tty/tty_port.c | 17 drivers/ufs/core/ufshcd.c | 5 drivers/ufs/host/ufs-mediatek.c | 5 drivers/usb/class/cdc-acm.c | 7 drivers/usb/dwc3/dwc3-of-simple.c | 7 drivers/usb/dwc3/gadget.c | 2 drivers/usb/dwc3/host.c | 2 drivers/usb/gadget/udc/lpc32xx_udc.c | 21 drivers/usb/host/ohci-nxp.c | 2 drivers/usb/host/xhci-dbgtty.c | 2 drivers/usb/host/xhci-hub.c | 2 drivers/usb/phy/phy-fsl-usb.c | 1 drivers/usb/phy/phy-isp1301.c | 7 drivers/usb/renesas_usbhs/pipe.c | 2 drivers/usb/serial/usb-serial.c | 7 drivers/usb/storage/unusual_uas.h | 2 drivers/usb/typec/altmodes/displayport.c | 8 drivers/usb/typec/ucsi/ucsi.c | 6 drivers/usb/usbip/vhci_hcd.c | 6 drivers/vdpa/octeon_ep/octep_vdpa_main.c | 1 drivers/vfio/pci/nvgrace-gpu/main.c | 4 drivers/vfio/pci/pds/dirty.c | 7 drivers/vfio/pci/vfio_pci_rdwr.c | 24 drivers/vhost/vsock.c | 15 drivers/video/fbdev/gbefb.c | 5 drivers/video/fbdev/pxafb.c | 12 drivers/video/fbdev/tcx.c | 2 drivers/virtio/virtio_balloon.c | 4 drivers/watchdog/via_wdt.c | 1 fs/btrfs/inode.c | 1 fs/btrfs/ioctl.c | 4 fs/btrfs/scrub.c | 5 fs/btrfs/tree-log.c | 46 fs/btrfs/volumes.c | 1 fs/erofs/zdata.c | 8 fs/exfat/file.c | 5 fs/exfat/super.c | 19 fs/ext4/ialloc.c | 1 fs/ext4/inode.c | 1 fs/ext4/mballoc.c | 2 fs/ext4/orphan.c | 4 fs/ext4/super.c | 6 fs/ext4/xattr.c | 6 fs/f2fs/compress.c | 5 fs/f2fs/data.c | 17 fs/f2fs/extent_cache.c | 5 fs/f2fs/f2fs.h | 17 fs/f2fs/file.c | 20 fs/f2fs/gc.c | 2 fs/f2fs/inode.c | 2 fs/f2fs/namei.c | 6 fs/f2fs/recovery.c | 20 fs/f2fs/segment.c | 9 fs/f2fs/super.c | 160 fs/f2fs/xattr.c | 30 fs/f2fs/xattr.h | 10 fs/fuse/file.c | 37 fs/gfs2/glops.c | 3 fs/gfs2/lops.c | 2 fs/gfs2/quota.c | 2 fs/gfs2/super.c | 4 fs/hfsplus/bnode.c | 4 fs/hfsplus/dir.c | 7 fs/hfsplus/inode.c | 32 fs/iomap/buffered-io.c | 41 fs/iomap/direct-io.c | 10 fs/jbd2/journal.c | 20 fs/jbd2/transaction.c | 2 fs/libfs.c | 50 fs/lockd/svc4proc.c | 4 fs/lockd/svclock.c | 21 fs/lockd/svcproc.c | 5 fs/locks.c | 12 fs/nfs_common/nfslocalio.c | 10 fs/nfsd/blocklayout.c | 3 fs/nfsd/export.c | 2 fs/nfsd/filecache.c | 2 fs/nfsd/localio.c | 4 fs/nfsd/netns.h | 11 fs/nfsd/nfs4state.c | 4 fs/nfsd/nfs4xdr.c | 5 fs/nfsd/nfssvc.c | 45 fs/nfsd/vfs.h | 3 fs/notify/fsnotify.c | 9 fs/ntfs3/file.c | 14 fs/ntfs3/frecord.c | 35 fs/ntfs3/ntfs_fs.h | 9 fs/ntfs3/run.c | 6 fs/ntfs3/super.c | 5 fs/ocfs2/suballoc.c | 10 fs/smb/client/fs_context.c | 2 fs/smb/server/mgmt/tree_connect.c | 18 fs/smb/server/mgmt/tree_connect.h | 1 fs/smb/server/mgmt/user_session.c | 4 fs/smb/server/smb2pdu.c | 20 fs/smb/server/vfs.c | 5 fs/smb/server/vfs_cache.c | 88 fs/xfs/scrub/attr_repair.c | 2 fs/xfs/xfs_attr_item.c | 2 fs/xfs/xfs_buf_item.c | 1 fs/xfs/xfs_qm.c | 5 include/drm/drm_buddy.h | 11 include/drm/drm_edid.h | 6 include/linux/balloon_compaction.h | 43 include/linux/compiler_types.h | 13 include/linux/fs.h | 2 include/linux/genalloc.h | 1 include/linux/hrtimer.h | 23 include/linux/jbd2.h | 6 include/linux/kasan.h | 16 include/linux/nfslocalio.h | 12 include/linux/reset.h | 1 include/linux/soundwire/sdw.h | 2 include/linux/tpm.h | 8 include/linux/tty_port.h | 21 include/linux/vfio_pci_core.h | 10 include/media/v4l2-mem2mem.h | 3 include/net/ip.h | 6 include/net/ip6_route.h | 4 include/net/route.h | 2 include/uapi/drm/xe_drm.h | 1 include/uapi/linux/mptcp.h | 1 io_uring/io_uring.c | 3 io_uring/openclose.c | 2 io_uring/poll.c | 9 kernel/kallsyms.c | 5 kernel/livepatch/core.c | 8 kernel/sched/cpudeadline.c | 34 kernel/sched/cpudeadline.h | 4 kernel/sched/deadline.c | 8 kernel/sched/debug.c | 8 kernel/sched/ext.c | 58 kernel/sched/fair.c | 103 kernel/sched/rt.c | 52 kernel/sched/sched.h | 4 kernel/scs.c | 2 kernel/trace/fgraph.c | 10 kernel/trace/trace_events.c | 2 kernel/trace/trace_events_synth.c | 1 lib/idr.c | 2 mm/balloon_compaction.c | 9 mm/damon/tests/core-kunit.h | 99 mm/damon/tests/sysfs-kunit.h | 25 mm/damon/tests/vaddr-kunit.h | 26 mm/kasan/common.c | 32 mm/kasan/hw_tags.c | 2 mm/kasan/shadow.c | 4 mm/ksm.c | 18 mm/page_owner.c | 2 mm/shmem.c | 18 mm/vmalloc.c | 8 net/bluetooth/rfcomm/tty.c | 7 net/bridge/br_private.h | 1 net/caif/cffrml.c | 9 net/ceph/osdmap.c | 116 net/core/sock.c | 16 net/dsa/dsa.c | 8 net/ethtool/ioctl.c | 30 net/handshake/request.c | 8 net/hsr/hsr_device.c | 7 net/hsr/hsr_forward.c | 2 net/ipv4/fib_trie.c | 7 net/ipv6/calipso.c | 3 net/ipv6/exthdrs.c | 2 net/ipv6/icmp.c | 4 net/ipv6/ila/ila_lwt.c | 2 net/ipv6/ioam6_iptunnel.c | 37 net/ipv6/ip6_gre.c | 17 net/ipv6/ip6_output.c | 19 net/ipv6/ip6_tunnel.c | 4 net/ipv6/ip6_udp_tunnel.c | 2 net/ipv6/ip6_vti.c | 2 net/ipv6/ndisc.c | 6 net/ipv6/netfilter/nf_dup_ipv6.c | 2 net/ipv6/output_core.c | 2 net/ipv6/route.c | 33 net/ipv6/rpl_iptunnel.c | 4 net/ipv6/seg6_iptunnel.c | 20 net/ipv6/seg6_local.c | 2 net/mac80211/cfg.c | 10 net/mptcp/pm_netlink.c | 3 net/mptcp/protocol.c | 22 net/netfilter/ipvs/ip_vs_xmit.c | 3 net/netfilter/nf_conncount.c | 25 net/netfilter/nf_nat_core.c | 14 net/netfilter/nf_tables_api.c | 11 net/netfilter/nft_ct.c | 5 net/netrom/nr_out.c | 4 net/nfc/core.c | 9 net/openvswitch/flow_netlink.c | 13 net/openvswitch/vport-netdev.c | 17 net/rose/af_rose.c | 2 net/sched/sch_ets.c | 6 net/sunrpc/auth_gss/svcauth_gss.c | 3 net/sunrpc/xprtrdma/svc_rdma_rw.c | 7 net/wireless/core.c | 1 net/wireless/core.h | 1 net/wireless/mlme.c | 19 net/wireless/sme.c | 2 net/wireless/util.c | 23 samples/ftrace/ftrace-direct-modify.c | 8 samples/ftrace/ftrace-direct-multi-modify.c | 8 samples/ftrace/ftrace-direct-multi.c | 4 samples/ftrace/ftrace-direct-too.c | 4 samples/ftrace/ftrace-direct.c | 4 scripts/Makefile.build | 26 scripts/Makefile.modinst | 2 scripts/faddr2line | 13 security/keys/trusted-keys/trusted_tpm2.c | 6 sound/isa/wavefront/wavefront_midi.c | 133 sound/isa/wavefront/wavefront_synth.c | 18 sound/pci/hda/cs35l41_hda.c | 2 sound/pcmcia/pdaudiocf/pdaudiocf.c | 8 sound/pcmcia/vx/vxpocket.c | 8 sound/soc/codecs/ak4458.c | 4 sound/soc/codecs/lpass-tx-macro.c | 3 sound/soc/codecs/wcd939x-sdw.c | 8 sound/soc/qcom/qdsp6/q6adm.c | 146 sound/soc/qcom/qdsp6/q6apm-dai.c | 2 sound/soc/qcom/qdsp6/q6asm-dai.c | 7 sound/soc/qcom/sc7280.c | 2 sound/soc/qcom/sc8280xp.c | 2 sound/soc/qcom/sdw.c | 107 sound/soc/qcom/sdw.h | 1 sound/soc/qcom/sm8250.c | 2 sound/soc/qcom/x1e80100.c | 2 sound/soc/sh/rz-ssi.c | 64 sound/soc/stm/stm32_sai.c | 14 sound/soc/stm/stm32_sai_sub.c | 51 sound/usb/mixer_us16x08.c | 20 tools/lib/perf/cpumap.c | 10 tools/mm/page_owner_sort.c | 6 tools/testing/ktest/config-bisect.pl | 4 tools/testing/nvdimm/test/nfit.c | 7 tools/testing/radix-tree/idr-test.c | 21 tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc | 5 tools/testing/selftests/iommu/iommufd.c | 54 tools/testing/selftests/iommu/iommufd_fail_nth.c | 3 tools/testing/selftests/iommu/iommufd_utils.h | 38 tools/testing/selftests/net/mptcp/pm_netlink.sh | 4 tools/testing/selftests/net/mptcp/pm_nl_ctl.c | 11 tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c | 13 tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh | 2 tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt | 2 tools/testing/selftests/net/tap.c | 16 virt/kvm/kvm_main.c | 2 608 files changed, 7293 insertions(+), 5171 deletions(-)
Aboorva Devarajan (1): cpuidle: menu: Use residency threshold in polling state override decisions
Ahmed Genidi (1): KVM: arm64: Initialize SCTLR_EL1 in __kvm_hyp_init_cpu()
Akhil P Oommen (1): drm/msm/a6xx: Fix out of bound IO access in a6xx_get_gmu_registers
Al Viro (1): shmem: fix recovery on rename failures
Alessio Belle (1): drm/imagination: Disallow exporting of PM/FW protected objects
Alex Deucher (3): drm/amd/display: Use GFP_ATOMIC in dc_create_plane_state() drm/amdgpu/gmc12: add amdgpu_vm_handle_fault() handling drm/amdgpu/gmc11: add amdgpu_vm_handle_fault() handling
Alexander Stein (1): serial: core: Fix serial device initialization
Alexey Simakov (2): broadcom: b44: prevent uninitialized value usage hwmon: (tmp401) fix overflow caused by default conversion rate value
Alexey Velichayshiy (1): gfs2: fix freeze error handling
Alice C. Munduruca (1): selftests: net: fix "buffer overflow detected" for tap.c
Alison Schofield (1): tools/testing/nvdimm: Use per-DIMM device handle
Alok Tiwari (2): RDMA/bnxt_re: Fix incorrect BAR check in bnxt_qplib_map_creq_db() RDMA/bnxt_re: Fix IB_SEND_IP_CSUM handling in post_send
Aloka Dixit (1): wifi: mac80211: do not use old MBSSID elements
Amir Goldstein (1): fsnotify: do not generate ACCESS/MODIFY events on child for special files
Andreas Gruenbacher (3): gfs2: fix remote evict for read-only filesystems gfs2: Fix "gfs2: Switch to wait_event in gfs2_quotad" gfs2: Fix use of bio_chain
Andrew Jeffery (1): dt-bindings: mmc: sdhci-of-aspeed: Switch ref to sdhci-common.yaml
Andrew Morton (1): genalloc.h: fix htmldocs warning
Andrey Vatoropin (1): scsi: target: Reset t_task_cdb pointer in error case
Andrii Melnychenko (1): netfilter: nft_ct: add seqadj extension for natted connections
Andy Shevchenko (6): nfsd: Mark variable __maybe_unused to avoid W=1 build break serial: core: Restore sysfs fwnode information gpiolib: acpi: Switch to use enum in acpi_gpio_in_ignore_list() gpiolib: acpi: Handle deferred list via new API gpiolib: acpi: Add acpi_gpio_need_run_edge_events_on_boot() getter gpiolib: acpi: Move quirks to a separate file
Ankit Garg (1): gve: defer interrupt enabling until NAPI registration
Anshumali Gaur (1): octeontx2-pf: fix "UBSAN: shift-out-of-bounds error"
Anurag Dutta (1): spi: cadence-quadspi: Fix clock disable on probe failure path
Ard Biesheuvel (1): drm/i915: Fix format string truncation warning
Armin Wolf (1): ACPI: fan: Workaround for 64-bit firmware bug
Arunpravin Paneer Selvam (2): drm/buddy: Optimize free block management with RB tree drm/buddy: Separate clear and dirty free block trees
Ashutosh Dixit (1): drm/xe/oa: Disallow 0 OA property values
Askar Safin (1): gpiolib: acpi: Add quirk for Dell Precision 7780
Avadhut Naik (1): x86/mce: Do not clear bank's poll bit in mce_poll_banks on AMD SMCA systems
Bagas Sanjaya (1): net: bridge: Describe @tunnel_hash member in net_bridge_vlan_group struct
Baokun Li (1): ext4: align max orphan file size with e2fsprogs limit
Bartosz Golaszewski (1): platform/x86: intel: chtwc_int33fe: don't dereference swnode args
Ben Collins (1): powerpc/addnote: Fix overflow on 32-bit builds
Bernd Schubert (2): fuse: Always flush the page cache before FOPEN_DIRECT_IO write fuse: Invalidate the page cache after FOPEN_DIRECT_IO write
Biju Das (2): ASoC: renesas: rz-ssi: Fix channel swap issue in full duplex mode ASoC: renesas: rz-ssi: Fix rz_ssi_priv::hw_params_cache::sample_width
Bitterblue Smith (1): wifi: rtl8xxxu: Fix HT40 channel config for RTL8192CU, RTL8723AU
Boris Brezillon (1): drm/panthor: Flush shmem writes before mapping buffers CPU-uncached
Borislav Petkov (AMD) (1): x86/microcode/AMD: Select which microcode patch to load
Brian Gerst (1): x86/xen: Move Xen upcall handler
Brian Vazquez (1): idpf: reduce mbx_task schedule delay to 300us
Byungchul Park (1): jbd2: use a weaker annotation in journal handling
Chandrakanth Patil (1): scsi: mpi3mr: Read missing IOCFacts flag for reply queue full overflow
Chao Yu (9): f2fs: fix to avoid potential deadlock f2fs: fix to avoid updating zero-sized extent in extent cache f2fs: fix return value of f2fs_recover_fsync_data() f2fs: fix to detect recoverable inode during dryrun of find_fsync_dnodes() f2fs: use global inline_xattr_slab instead of per-sb slab cache f2fs: fix to avoid updating compression context during writeback f2fs: add timeout in f2fs_enable_checkpoint() f2fs: dump more information for f2fs_{enable,disable}_checkpoint() f2fs: fix to propagate error from f2fs_enable_checkpoint()
Chen Changcheng (2): usb: usb-storage: No additional quirks need to be added to the EL-R12 optical drive. usb: usb-storage: Maintain minimal modifications to the bcdDevice range.
Chen-Yu Tsai (1): media: mediatek: vcodec: Use spinlock for context list protection lock
ChenXiaoSong (1): smb/server: fix return value of smb2_ioctl()
Chenghao Duan (2): samples/ftrace: Adjust LoongArch register restore order in direct calls LoongArch: Refactor register restoration in ftrace_common_return
Chia-Lin Kao (AceLan) (1): platform/x86/intel/hid: Add Dell Pro Rugged 10/12 tablet to VGBS DMI quirks
Chingbin Li (1): Bluetooth: btusb: Add new VID/PID 2b89/6275 for RTL8761BUV
Chris Lu (2): Bluetooth: btusb: MT7922: Add VID/PID 0489/e170 Bluetooth: btusb: MT7920: Add VID/PID 0489/e135
Christian Hitz (3): leds: leds-lp50xx: Allow LED 0 to be added to module bank leds: leds-lp50xx: LP5009 supports 3 modules for a total of 9 LEDs leds: leds-lp50xx: Enable chip before any communication
Christian Marangi (1): mtd: mtdpart: ignore error -ENOENT from parsers on subpartitions
Christoffer Sandberg (1): Input: i8042 - add TUXEDO InfinityBook Max Gen10 AMD to i8042 quirk table
Christoph Hellwig (2): xfs: don't leak a locked dquot when xfs_dquot_attach_buf fails iomap: allocate s_dio_done_wq for async reads as well
Christophe Leroy (1): spi: fsl-cpm: Check length parity before switching to 16 bit mode
Chuck Lever (2): NFSD: Clear SECLABEL in the suppattr_exclcreat bitmap NFSD: NFSv4 file creation neglects setting ACL
Claudiu Beznea (2): serial: sh-sci: Check that the DMA cookie is valid pinctrl: renesas: rzg2l: Fix ISEL restore on resume
Colin Ian King (1): media: pvrusb2: Fix incorrect variable used in trace message
Cong Zhang (1): blk-mq: skip CPU offline notify on unmapped hctx
Cryolitia PukNgae (1): ACPICA: Avoid walking the Namespace if start_node is NULL
Dai Ngo (1): NFSD: use correct reservation type in nfsd4_scsi_fence_client
Damien Le Moal (4): block: Clear BLK_ZONE_WPLUG_PLUGGED when aborting plugged BIOs block: freeze queue when updating zone resources block: handle zone management operations completions block: fix NULL pointer dereference in blk_zone_reset_all_bio_endio()
Dan Carpenter (3): nfc: pn533: Fix error code in pn533_acr122_poweron_rdr() block: rnbd-clt: Fix signedness bug in init_dev() wifi: cfg80211: sme: store capped length in __cfg80211_connect_result()
Daniel Wagner (1): nvme-fc: don't hold rport lock when putting ctrl
Darrick J. Wong (2): xfs: fix stupid compiler warning xfs: fix a UAF problem in xattr repair
Dave Stevenson (1): media: i2c: imx219: Fix 1920x1080 mode to use 1:1 pixel aspect ratio
Dave Vasilevsky (1): powerpc, mm: Fix mprotect on book3s 32-bit
David Hildenbrand (4): powerpc/pseries/cmm: call balloon_devinfo_init() also without CONFIG_BALLOON_COMPACTION mm/balloon_compaction: we cannot have isolated pages in the balloon list mm/balloon_compaction: convert balloon_page_delete() to balloon_page_finalize() powerpc/pseries/cmm: adjust BALLOON_MIGRATE when migrating pages
David Strahan (1): scsi: smartpqi: Add support for Hurray Data new controller PCI device
Deepakkumar Karn (1): net: usb: rtl8150: fix memory leak on usb_submit_urb() failure
Deepanshu Kartikey (4): btrfs: fix memory leak of fs_devices in degraded seed device path f2fs: invalidate dentry cache on failed whiteout creation net: usb: asix: validate PHY address before use net: nfc: fix deadlock between nfc_unregister_device and rfkill_fop_write
Denis Arefev (1): ALSA: hda: cs35l41: Fix NULL pointer dereference in cs35l41_hda_read_acpi()
Denis Sergeev (1): hwmon: (dell-smm) Limit fan multiplier to avoid overflow
Dmitry Skorodumov (1): ipvlan: Ignore PACKET_LOOPBACK in handle_mode_l2()
Donet Tom (1): powerpc/64s/slb: Fix SLB multihit issue during SLB preload
Dongli Zhang (1): KVM: nVMX: Immediately refresh APICv controls as needed on nested VM-Exit
Doug Berger (1): sched/deadline: only set free_cpus for online runqueues
Duoming Zhou (5): Input: alps - fix use-after-free bugs caused by dev3_register_work usb: phy: fsl-usb: Fix use-after-free in delayed work during device removal media: TDA1997x: Remove redundant cancel_delayed_work in probe media: i2c: ADV7604: Remove redundant cancel_delayed_work in probe media: i2c: adv7842: Remove redundant cancel_delayed_work in probe
Encrow Thorne (1): reset: fix BIT macro reference
Eric Biggers (1): lib/crypto: x86/blake2s: Fix 32-bit arg treated as 64-bit
Eric Dumazet (3): ip6_gre: make ip6gre_header() robust ipv6: adopt dst_dev() helper net: use dst_dev_rcu() in sk_setup_caps()
Ethan Nelson-Moore (1): net: usb: sr9700: fix incorrect command used to write single register
Fedor Pchelkin (1): ext4: fix string copying in parse_apply_sb_mount_options()
Fernando Fernandez Mancera (1): netfilter: nf_conncount: fix leaked ct in error paths
Filipe Manana (2): btrfs: do not skip logging new dentries when logging a new name btrfs: don't log conflicting inode if it's a dir moved in the current transaction
Finn Thain (1): powerpc: Add reloc_offset() to font bitmap pointer used for bootx_printf()
Florian Westphal (2): netfilter: nf_nat: remove bogus direction check selftests: netfilter: packetdrill: avoid failure on HZ=100 kernel
Gal Pressman (1): ethtool: Avoid overflowing userspace buffer on stats query
George Kennedy (1): perf/x86/amd: Check event before enable to avoid GPF
Gongwei Li (1): Bluetooth: btusb: Add new VID/PID 13d3/3533 for RTL8821CE
Greg Kroah-Hartman (1): Linux 6.12.64
Gregory CLEMENT (1): MIPS: ftrace: Fix memory corruption when kernel is located beyond 32 bits
Gregory Herrero (1): i40e: validate ring_len parameter against hardware-specific values
Guangshuo Li (2): crypto: caam - Add check for kcalloc() in test_len() e1000: fix OOB in e1000_tbi_should_accept()
Gui-Dong Han (3): hwmon: (max16065) Use local variable to avoid TOCTOU hwmon: (w83791d) Convert macros to functions to avoid TOCTOU hwmon: (w83l786ng) Convert macros to functions to avoid TOCTOU
H. Peter Anvin (1): compiler_types.h: add "auto" as a macro for "__auto_type"
Haibo Chen (1): ext4: clear i_state_flags when alloc inode
Hal Feng (1): cpufreq: dt-platdev: Add JH7110S SOC to the allowlist
Hangbin Liu (1): hsr: hold rcu and dev lock for hsr_get_port_ndev
Hans de Goede (2): wifi: brcmfmac: Add DMI nvram filename quirk for Acer A1 840 tablet HID: logitech-dj: Remove duplicate error logging
Haotian Zhang (5): ALSA: vxpocket: Fix resource leak in vxpocket_probe error path ALSA: pcmcia: Fix resource leak in snd_pdacf_probe error path media: rc: st_rc: Fix reset control resource leak media: cec: Fix debugfs leak on bus_register() failure media: videobuf2: Fix device reference leak in vb2_dc_alloc error path
Haoxiang Li (7): MIPS: Fix a reference leak bug in ip22_check_gio() usb: typec: altmodes/displayport: Drop the device reference in dp_altmode_probe() usb: renesas_usbhs: Fix a resource leak in usbhs_pipe_malloc() xfs: fix a memory leak in xfs_buf_item_init() media: mediatek: vcodec: Fix a reference leak in mtk_vcodec_fw_vpu_init() fjes: Add missing iounmap in fjes_hw_init() nfsd: Drop the client reference in client_states_open()
Harshit Agarwal (1): sched/rt: Fix race in push_rt_task
Helge Deller (1): parisc: Do not reprogram affinitiy on ASP chip
Hengqi Chen (2): LoongArch: BPF: Zero-extend bpf_tail_call() index LoongArch: BPF: Sign extend kfunc call arguments
Herbert Xu (1): crypto: seqiv - Do not use req->iv after crypto_aead_encrypt
Honggang LI (1): RDMA/rtrs: Fix clt_path::max_pages_per_mr calculation
Hongyu Xie (1): usb: xhci: limit run_graceperiod for only usb 3.0 devices
Huacai Chen (2): LoongArch: Add new PCI ID for pci_fixup_vgadev() LoongArch: Fix build errors for CONFIG_RANDSTRUCT
Ian Rogers (1): libperf cpumap: Fix perf_cpu_map__max for an empty/NULL map
Ido Schimmel (4): mlxsw: spectrum_router: Fix possible neighbour reference count leak mlxsw: spectrum_router: Fix neighbour use-after-free mlxsw: spectrum_mr: Fix use-after-free when updating multicast route stats ipv4: Fix reference count leak when using error routes with nexthop objects
Ilya Dryomov (1): libceph: make decode_pool() more resilient against corrupted osdmaps
Ilya Maximets (1): net: openvswitch: fix middle attribute validation in push_nsh() action
Ivan Abramov (2): media: adv7842: Avoid possible out-of-bounds array accesses in adv7842_cp_log_status() media: msp3400: Avoid possible out-of-bounds array accesses in msp3400c_thread()
Jacky Chou (1): net: mdio: aspeed: add dummy read to avoid read-after-write issue
Jaegeuk Kim (1): f2fs: drop inode from the donation list when the last file is closed
Jamal Hadi Salim (1): net/sched: ets: Always remove class from active list before deleting in ets_qdisc_change
Jan Maslak (1): drm/xe: Restore engine registers before restarting schedulers after GT reset
Jan Prusakowski (1): f2fs: ensure node page reads complete before f2fs_put_super() finishes
Jang Ingyu (1): RDMA/core: Fix logic error in ib_get_gids_from_rdma_hdr()
Jani Nikula (3): drm/displayid: pass iter to drm_find_displayid_extension() drm/edid: add DRM_EDID_IDENT_INIT() to initialize struct drm_edid_ident drm/displayid: add quirk to ignore DisplayID checksum errors
Jared Kangas (1): mmc: sdhci-esdhc-imx: add alternate ARCH_S32 dependency to Kconfig
Jarkko Sakkinen (3): KEYS: trusted: Fix a memory leak in tpm2_load_cmd tpm: Cap the number of PCR banks tpm2-sessions: Fix tpm2_read_public range checks
Jason Gunthorpe (4): iommufd/selftest: Make it clearer to gcc that the access is not out of bounds iommufd/selftest: Check for overflow in IOMMU_TEST_OP_ADD_RESERVED RDMA/core: Check for the presence of LS_NLA_TYPE_DGID correctly RDMA/cm: Fix leaking the multicast GID table reference
Jay Cornwall (1): drm/amdkfd: Trap handler support for expert scheduling mode
Jens Axboe (2): io_uring/poll: correctly handle io_poll_add() return value on update io_uring: fix min_wait wakeups for SQPOLL
Jens Reidel (1): clk: qcom: dispcc-sm7150: Fix dispcc_mdss_pclk0_clk_src
Jeongjun Park (2): media: dvb-usb: dtv5100: fix out-of-bounds in dtv5100_i2c_msg() media: vidtv: initialize local pointers upon transfer of memory ownership
Jian Shen (3): net: hns3: using the num_tqps in the vf driver to apply for resources net: hns3: using the num_tqps to check whether tqp_index is out of range when vf get ring info from mbx net: hns3: add VLAN id validation before using
Jianpeng Chang (1): arm64: kdump: Fix elfcorehdr overlap caused by reserved memory processing reorder
Jiayuan Chen (2): ipv6: fix a BUG in rt6_get_pcpu_route() under PREEMPT_RT mm/kasan: fix incorrect unpoisoning in vrealloc for KASAN
Jim Mattson (2): KVM: SVM: Mark VMCB_NPT as dirty on nested VMRUN KVM: SVM: Mark VMCB_PERM_MAP as dirty on nested VMRUN
Jim Quinlan (2): PCI: brcmstb: Set MLW based on "num-lanes" DT property if present PCI: brcmstb: Fix disabling L0s capability
Jinhui Guo (5): ipmi: Fix the race between __scan_channels() and deliver_response() ipmi: Fix __scan_channels() failing to rescan channels i2c: designware: Disable SMBus interrupts to prevent storms from mis-configured firmware iommu/amd: Fix pci_segment memleak in alloc_pci_segment() iommu/amd: Propagate the error code returned by __modify_irte_ga() in modify_irte_ga()
Jiri Pirko (1): team: fix check for port enabled in team_queue_override_port_prio_changed()
Jiri Slaby (SUSE) (2): tty: introduce and use tty_port_tty_vhangup() helper tty: fix tty_port_tty_*hangup() kernel-doc
Joanne Koong (3): iomap: adjust read range correctly for non-block-aligned positions iomap: account for unaligned end offsets when truncating read range fuse: fix readahead reclaim deadlock
Johan Hovold (34): phy: broadcom: bcm63xx-usbh: fix section mismatches usb: ohci-nxp: fix device leak on probe failure usb: phy: isp1301: fix non-OF device reference imbalance usb: gadget: lpc32xx_udc: fix clock imbalance in error path amba: tegra-ahb: Fix device leak on SMMU enable soc: samsung: exynos-pmu: fix device leak on regmap lookup soc: qcom: pbs: fix device leak on lookup soc: qcom: ocmem: fix device leak on lookup soc: apple: mailbox: fix device leak on lookup soc: amlogic: canvas: fix device leak on lookup hwmon: (max6697) fix regmap leak on probe failure iommu/mediatek: fix use-after-free on probe deferral ASoC: codecs: wcd939x: fix regmap leak on probe failure ASoC: stm32: sai: fix device leak on probe ASoC: stm32: sai: fix clk prepare imbalance on probe failure ASoC: stm32: sai: fix OF node leak on probe iommu/apple-dart: fix device leak on of_xlate() iommu/exynos: fix device leak on of_xlate() iommu/ipmmu-vmsa: fix device leak on of_xlate() iommu/mediatek-v1: fix device leak on probe_device() iommu/mediatek-v1: fix device leaks on probe() iommu/mediatek: fix device leak on of_xlate() iommu/omap: fix device leaks on probe_device() iommu/qcom: fix device leak on of_xlate() iommu/sun50i: fix device leak on of_xlate() iommu/tegra: fix device leak on probe_device() mfd: altera-sysmgr: Fix device leak on sysmgr regmap lookup media: platform: mtk-mdp3: fix device leaks at probe media: vpif_capture: fix section mismatch media: vpif_display: fix section mismatch drm/mediatek: Fix probe resource leaks drm/mediatek: Fix probe memory leak drm/mediatek: Fix probe device leaks serial: core: fix OF node leak
Johannes Berg (2): wifi: cfg80211: stop radar detection in cfg80211_leave() wifi: cfg80211: use cfg80211_leave() in iftype change
John Garry (1): scsi: scsi_debug: Fix atomic write enable module param description
Jonas Gorski (1): net: dsa: b53: skip multicast entries for fdb_dump()
Jonathan Kim (1): drm/amdkfd: bump minimum vgpr size for gfx1151
Josef Bacik (1): btrfs: don't rewrite ret from inode_permission
Joshua Hay (8): idpf: add support for SW triggered interrupts idpf: trigger SW interrupt when exiting wb_on_itr mode idpf: add support for Tx refillqs in flow scheduling mode idpf: improve when to set RE bit logic idpf: simplify and fix splitq Tx packet rollback error path idpf: replace flow scheduling buffer ring with buffer pool idpf: stop Tx if there are insufficient buffer resources idpf: remove obsolete stashing code
Joshua Rogers (4): svcrdma: return 0 on success from svc_rdma_copy_inline_range svcrdma: use rc_pageoff for memcpy byte offset SUNRPC: svcauth_gss: avoid NULL deref on zero length gss_token in gss_read_proxy_verf svcrdma: bound check rq_pages index in inline path
Josua Mayer (1): clk: mvebu: cp110 add CLK_IGNORE_UNUSED to pcie_x10, pcie_x11 & pcie_x4
Juergen Gross (1): x86/xen: Fix sparse warning in enlighten_pv.c
Junbeom Yeom (1): erofs: fix unexpected EIO under memory pressure
Junjie Cao (1): Input: ti_am335x_tsc - fix off-by-one error in wire_order validation
Junrui Luo (6): caif: fix integer underflow in cffrml_receive() hwmon: (ibmpex) fix use-after-free in high/low store scsi: aic94xx: fix use-after-free in device removal path ALSA: wavefront: Clear substream pointers on close platform/x86: ibm_rtl: fix EBDA signature search pointer arithmetic platform/x86: hp-bioscfg: Fix out-of-bounds array access in ACPI package parsing
Junxiao Chang (2): drm/me/gsc: mei interrupt top half should be in irq disabled context mei: gsc: add dependency on Xe driver
Justin Iurman (1): net: ipv6: ioam6: use consistent dst names
Justin Tee (1): nvme-fabrics: add ENOKEY to no retry criteria for authentication failures
Kalesh AP (1): RDMA/bnxt_re: Fix to use correct page size for PDE table
Karina Yankevich (1): ext4: xattr: fix null pointer deref in ext4_raw_inode()
Kartik Rajput (1): soc/tegra: fuse: Do not register SoC device on ACPI boot
Kaushlendra Kumar (1): tools/mm/page_owner_sort: fix timestamp comparison for stable sorting
Kevin Tian (1): vfio/pci: Disable qword access to the PCI ROM bar
Kohei Enju (1): iavf: fix off-by-one issues in iavf_config_rss_reg()
Konstantin Komarov (3): fs/ntfs3: Support timestamps prior to epoch fs/ntfs3: check for shutdown in fsync fs/ntfs3: fix mount failure for sparse runs in run_unpack()
Krzysztof Kozlowski (8): dt-bindings: PCI: qcom,pcie-sc7280: Add missing required power-domains and resets dt-bindings: PCI: qcom,pcie-sc8280xp: Add missing required power-domains and resets dt-bindings: PCI: qcom,pcie-sm8150: Add missing required power-domains and resets dt-bindings: PCI: qcom,pcie-sm8250: Add missing required power-domains and resets dt-bindings: PCI: qcom,pcie-sm8350: Add missing required power-domains and resets dt-bindings: PCI: qcom,pcie-sm8450: Add missing required power-domains and resets dt-bindings: PCI: qcom,pcie-sm8550: Add missing required power-domains and resets mfd: max77620: Fix potential IRQ chip conflict when probing two devices
Krzysztof Niemiec (1): drm/i915/gem: Zero-initialize the eb.vma array in i915_gem_do_execbuffer
Laurent Pinchart (2): media: v4l2-mem2mem: Fix outdated documentation media: amphion: Make some vpu_v4l2 functions static
Li Chen (1): block: rate-limit capacity change info log
Li Nan (1): md: Fix static checker warning in analyze_sbs
Li Qiang (1): via_wdt: fix critical boot hang due to unnamed resource allocation
Lizhi Xu (1): usbip: Fix locking bug in RT-enabled kernels
Lu Baolu (1): iommu: disable SVA when CONFIG_X86 is set
Lukas Wunner (1): PCI/PM: Reinstate clearing state_saved in legacy and !PM codepaths
Lyude Paul (1): drm/nouveau/dispnv50: Don't call drm_atomic_get_crtc_state() in prepare_fb
Ma Ke (4): perf: arm_cspmu: fix error handling in arm_cspmu_impl_unregister() USB: lpc32xx_udc: Fix error handling in probe intel_th: Fix error handling in intel_th_output_open i2c: amd-mp2: fix reference leak in MP2 PCI device
Maciej Wieczor-Retman (2): kasan: refactor pcpu kasan vmalloc unpoison kasan: unpoison vms[area] addresses with a common tag
Mahesh Rao (1): firmware: stratix10-svc: Add mutex in stratix10 memory management
Marc Kleine-Budde (1): can: gs_usb: gs_can_open(): fix error handling
Marc Zyngier (1): arm64: Revamp HCR_EL2.E2H RES1 detection
Marek Szyprowski (1): media: samsung: exynos4-is: fix potential ABBA deadlock on init
Marijn Suijten (1): drm/panel: sony-td4353-jdi: Enable prepare_prev_first
Mario Limonciello (3): Revert "drm/amd/display: Fix pbn to kbps Conversion" drm/amdkfd: Export the cwsr_size and ctl_stack_size to userspace gpiolib: acpi: Add a quirk for Acer Nitro V15
Mario Limonciello (AMD) (2): Revert "drm/amd: Skip power ungate during suspend for VPE" gpiolib: acpi: Add quirk for ASUS ProArt PX13
Mark Pearson (1): usb: typec: ucsi: Handle incorrect num_connectors capability
Mark Rutland (1): KVM: arm64: Initialize HCR_EL2.E2H early
Matthew Brost (2): drm/xe: Adjust long-running workload timeslices to reasonable values drm/xe: Use usleep_range for accurate long-running workload timeslicing
Matthew Wilcox (Oracle) (2): ntfs: Do not overwrite uptodate pages idr: fix idr_alloc() returning an ID out of range
Matthias Schiffer (1): ti-sysc: allow OMAP2 and OMAP4 timers to be reserved on AM33xx
Matthieu Baerts (NGI0) (2): selftests: mptcp: pm: ensure unknown flags are ignored mptcp: pm: ignore unknown endpoint flags
Max Chou (1): Bluetooth: btusb: Add new VID/PID 0x0489/0xE12F for RTL8852BE-VT
Maxim Levitsky (1): KVM: x86: Don't clear async #PF queue when CR0.PG is disabled (e.g. on #SMI)
Miaoqian Lin (5): usb: dwc3: of-simple: fix clock resource leak in dwc3_of_simple_probe cpufreq: nforce2: fix reference count leak in nforce2 virtio: vdpa: Fix reference count leak in octep_sriov_enable() media: renesas: rcar_drif: fix device node reference leak in rcar_drif_bond_enabled drm/mediatek: Fix device node reference leak in mtk_dp_dt_parse()
Michael Chan (1): bnxt_en: Fix XDP_TX path
Michael Margolin (1): RDMA/efa: Remove possible negative shift
Michal Schmidt (1): RDMA/irdma: avoid invalid read in irdma_net_event
Mike Snitzer (2): nfsd: update percpu_ref to manage references on nfsd_net nfsd: rename nfsd_serv_ prefixed methods and variables with nfsd_net_
Mikhail Malyshev (1): kbuild: Use objtree for module signing key path
Mikulas Patocka (1): dm-bufio: align write boundary on physical block size
Ming Qian (3): media: amphion: Cancel message work before releasing the VPU core media: amphion: Add a frame flush mode for decoder media: amphion: Remove vpu_vb_is_codecconfig
Minseong Kim (1): Input: lkkbd - disable pending work before freeing device
Miquel Raynal (6): mtd: spi-nor: winbond: Add support for W25Q01NWxxIQ chips mtd: spi-nor: winbond: Add support for W25Q01NWxxIM chips mtd: spi-nor: winbond: Add support for W25Q02NWxxIM chips mtd: spi-nor: winbond: Add support for W25H512NWxxAM chips mtd: spi-nor: winbond: Add support for W25H01NWxxAM chips mtd: spi-nor: winbond: Add support for W25H02NWxxAM chips
Morning Star (1): wifi: rtlwifi: 8192cu: fix tid out of range in rtl92cu_tx_fill_desc()
Moshe Shemesh (2): net/mlx5: fw reset, clear reset requested on drain_fw_reset net/mlx5: Drain firmware reset in shutdown callback
Nam Cao (2): hrtimers: Introduce hrtimer_update_function() serial: xilinx_uartps: Use helper function hrtimer_update_function()
Namjae Jeon (3): ksmbd: fix use-after-free in ksmbd_tree_connect_put under concurrency ksmbd: Fix refcount leak when invalid session is found on session lookup ksmbd: fix buffer validation by including null terminator size in EA length
Nathan Chancellor (1): clk: samsung: exynos-clkout: Assign .num before accessing .hws
NeilBrown (1): lockd: fix vfs_test_lock() calls
Nicolas Dufresne (2): media: verisilicon: Fix CPU stalls on G2 bus error media: verisilicon: Protect G2 HEVC decoder against invalid DPB index
Nicolas Ferre (2): ARM: dts: microchip: sama5d2: fix spi flexcom fifo size to 32 ARM: dts: microchip: sama7g5: fix uart fifo size to 32
Nicolin Chen (1): iommufd/selftest: Update hw_info coverage for an input data_type
Nikolay Kuratov (1): drm/msm/dpu: Add missing NULL pointer check for pingpong interface
Nuno Sá (1): hwmon: (ltc4282): Fix reset_history file permissions
Nysal Jan K.A. (1): powerpc/kexec: Enable SMT before waking offline CPUs
Ondrej Mosnacek (1): bpf, arm64: Do not audit capability check in do_jit()
Pablo Neira Ayuso (1): netfilter: nf_tables: remove redundant chain validation on register store
Pankaj Raghav (1): scripts/faddr2line: Fix "Argument list too long" error
Paolo Abeni (2): mptcp: schedule rtx timer only after pushing data mptcp: avoid deadlock on fallback while reinjecting
Pedro Demarchi Gomes (1): ntfs: set dummy blocksize to read boot_block when mounting
Pei Xiao (1): iio: adc: ti_am335x_adc: Limit step_avg to valid range for gcc complains
Peng Fan (1): firmware: imx: scu-irq: Init workqueue before request mbox channel
Pengjie Zhang (2): ACPI: PCC: Fix race condition by removing static qualifier ACPI: CPPC: Fix missing PCC check for guaranteed_perf
Peter Wang (1): scsi: ufs: host: mediatek: Fix shutdown/suspend race condition
Peter Zijlstra (3): sched/fair: Revert max_newidle_lb_cost bump x86/ptrace: Always inline trivial accessors sched/eevdf: Fix min_vruntime vs avg_vruntime
Pierre-Eric Pelloux-Prayer (1): drm/amdgpu: add missing lock to amdgpu_ttm_access_memory_sdma
Pierre-Louis Bossart (1): soundwire: stream: extend sdw_alloc_stream() to take 'type' parameter
Ping Cheng (1): HID: input: map HID_GD_Z to ABS_DISTANCE for stylus/pen
Ping-Ke Shih (1): wifi: rtw88: limit indirect IO under powered off for RTL8822CS
Prithvi Tambewagh (2): io_uring: fix filename leak in __io_openat_prep() ocfs2: fix kernel BUG in ocfs2_find_victim_chain
Przemyslaw Korba (1): i40e: fix scheduling in set_rx_mode
Pwnverse (1): net: rose: fix invalid array index in rose_kill_by_device()
Qianchang Zhao (2): ksmbd: vfs: fix race on m_flags in vfs_cache ksmbd: skip lock-range check on equal size to avoid size==0 underflow
Qiang Ma (1): LoongArch: Correct the calculation logic of thread_count
Qu Wenruo (2): btrfs: fix a potential path leak in print_data_reloc_error() btrfs: scrub: always update btrfs_scrub_progress::last_physical
Quan Zhou (4): wifi: mt76: mt792x: fix wifi init fail by setting MCU_RUNNING after CLC load wifi: mt76: mt7925: fix the unfinished command of regd_notifier before suspend wifi: mt76: mt7925: fix CLC command timeout when suspend/resume wifi: mt76: mt7925: add handler to hif suspend/resume event
Rafael J. Wysocki (2): cpuidle: governors: teo: Drop misguided target residency check PM: runtime: Do not clear needs_force_resume with enabled runtime PM
Raju Rangoju (1): amd-xgbe: reset retries and mode on RX adapt failures
Ran Xiaokai (1): mm/page_owner: fix memory leak in page_owner_stack_fops->release()
Raphael Pinsonneault-Thibeault (1): Bluetooth: btusb: revert use of devm_kzalloc in btusb
Raviteja Laggyshetty (1): interconnect: qcom: sdx75: Drop QPIC interconnect and BCM nodes
Ray Wu (2): drm/amd/display: Fix scratch registers offsets for DCN35 drm/amd/display: Fix scratch registers offsets for DCN351
Rene Rebe (2): floppy: fix for PAGE_SIZE != 4KB fbdev: gbefb: fix to use physical address instead of dma address
René Rebe (3): r8169: fix RTL8117 Wake-on-Lan in DASH mode fbdev: tcx.c fix mem_map to correct smem_start offset drm/mgag200: Fix big-endian support
Rong Zhang (1): x86/microcode/AMD: Fix Entrysign revision check for Zen5/Strix Halo
Sai Krishna Potthuri (1): mmc: sdhci-of-arasan: Increase CD stable timeout to 2 seconds
Sakari Ailus (1): ACPI: property: Use ACPI functions in acpi_graph_get_next_endpoint() only
Sandipan Das (1): perf/x86/amd/uncore: Fix the return value of amd_uncore_df_event_init() on error
Sanjay Yadav (1): drm/xe/oa: Fix potential UAF in xe_oa_add_config_ioctl()
Sarthak Garg (1): mmc: sdhci-msm: Avoid early clock doubling during HS400 transition
Scott Mayhew (1): net/handshake: duplicate handshake cancellations leak socket
Sean Christopherson (4): KVM: Disallow toggling KVM_MEM_GUEST_MEMFD on an existing memslot KVM: x86: WARN if hrtimer callback for periodic APIC timer fires with period=0 KVM: nSVM: Set exit_code_hi to -1 when synthesizing SVM_EXIT_ERR (failed VMRUN) KVM: nSVM: Clear exit_code_hi in VMCB when synthesizing nested VM-Exits
SeongJae Park (16): mm/damon/tests/sysfs-kunit: handle alloc failures on damon_sysfs_test_add_targets() mm/damon/tests/vaddr-kunit: handle alloc failures in damon_test_split_evenly_fail() mm/damon/tests/vaddr-kunit: handle alloc failures on damon_test_split_evenly_succ() mm/damon/tests/core-kunit: handle alloc failures on damon_test_split_at() mm/damon/tests/core-kunit: handle allocation failures in damon_test_regions() mm/damon/tests/core-kunit: handle memory failure from damon_test_target() mm/damon/tests/core-kunit: handle memory alloc failure from damon_test_aggregate() mm/damon/tests/core-kunit: handle alloc failures on dasmon_test_merge_regions_of() mm/damon/tests/core-kunit: handle alloc failures on damon_test_merge_two() mm/damon/tests/core-kunit: handle alloc failures in damon_test_set_regions() mm/damon/tests/core-kunit: handle alloc failures in damon_test_update_monitoring_result() mm/damon/tests/core-kunit: handle alloc failures in damon_test_ops_registration() mm/damon/tests/core-kunit: handle alloc failure on damon_test_set_attrs() mm/damon/tests/core-kunit: handle alloc failures on damon_test_split_regions_of() mm/damon/tests/core-kunit: handle alloc failres in damon_test_new_filter() mm/damon/tests/vaddr-kunit: handle alloc failures on damon_do_test_apply_three_regions()
Seunghwan Baek (1): scsi: ufs: core: Add ufshcd_update_evt_hist() for UFS suspend error
Shardul Bankar (1): nfsd: fix memory leak in nfsd_create_serv error paths
Shaurya Rane (1): net/hsr: fix NULL pointer dereference in prp_get_untagged_frame()
Shay Drory (3): net/mlx5: fw_tracer, Validate format string parameters net/mlx5: fw_tracer, Handle escaped percent properly net/mlx5: Serialize firmware reset with devlink
Sheng Yong (1): f2fs: clear SBI_POR_DOING before initing inmem curseg
Shengjiu Wang (1): ASoC: ak4458: remove the reset operation in probe and remove
Shengming Hu (2): fgraph: Initialize ftrace_ops->private for function graph ops fgraph: Check ftrace_pids_enabled on registration for early filtering
Shipei Qu (1): ALSA: usb-mixer: us16x08: validate meter packet indices
Shivani Agarwal (1): crypto: af_alg - zero initialize memory allocated via sock_kmalloc
Shravan Kumar Ramani (1): platform/mellanox: mlxbf-pmc: Remove trailing whitespaces from event names
Shuhao Fu (1): cpufreq: s5pv210: fix refcount leak
Shuicheng Lin (2): drm/xe: Limit num_syncs to prevent oversized allocations drm/xe/oa: Limit num_syncs to prevent oversized allocations
Siddharth Vadapalli (1): arm64: dts: ti: k3-j721e-sk: Fix pinmux for pin Y1 used by power regulator
Simon Richter (1): drm/ttm: Avoid NULL pointer deref for evicted BOs
Slavin Liu (1): ipvs: fix ipv4 null-ptr-deref in route error path
Song Liu (1): livepatch: Match old_sympos 0 and 1 in klp_find_func()
Srinivas Kandagatla (7): rpmsg: glink: fix rpmsg device leak ASoC: codecs: lpass-tx-macro: fix SM6115 support ASoC: qcom: q6apm-dai: set flags to reflect correct operation of appl_ptr ASoC: qcom: q6asm-dai: perform correct state check before closing ASoC: qcom: q6adm: the the copp device only during last instance ASoC: qcom: qdsp6: q6asm-dai: set 10 ms period and buffer alignment. ASoC: qcom: sdw: fix memory leak for sdw_stream_runtime
Stanimir Varbanov (1): PCI: brcmstb: Reuse pcie_cfg_data structure
Stefan Haberland (1): s390/dasd: Fix gendisk parent after copy pair swap
Stefano Garzarella (1): vhost/vsock: improve RCU read sections around vhost_vsock_get()
Steven Rostedt (3): ktest.pl: Fix uninitialized var in config-bisect.pl tracing: Do not register unsupported perf events tracing: Fix fixed array of synthetic event
Sven Eckelmann (Plasma Cloud) (1): wifi: mt76: Fix DTS power-limits on little endian systems
Sven Schnelle (3): s390/ipl: Clear SBP flag when bootprog is set parisc: entry.S: fix space adjustment on interruption for 64-bit userspace parisc: entry: set W bit for !compat tasks in syscall_restore_rfi()
Takashi Iwai (1): ALSA: wavefront: Use guard() for spin locks
Tejun Heo (2): sched_ext: Factor out local_dsq_post_enq() from dispatch_enqueue() sched_ext: Fix missing post-enqueue handling in move_local_task_to_local_dsq()
Tetsuo Handa (3): hfsplus: Verify inode mode when loading from disk jbd2: use a per-journal lock_class_key for jbd2_trans_commit_key RDMA/core: always drop device refcount in ib_del_sub_device_and_put()
Thomas De Schampheleire (1): kbuild: fix compilation of dtb specified on command-line without make rule
Thomas Fourier (4): block: rnbd-clt: Fix leaked ID in init_dev() platform/x86: msi-laptop: add missing sysfs_remove_group() firewire: nosy: Fix dma_free_coherent() size RDMA/bnxt_re: fix dma_free_coherent() pointer
Thomas Gleixner (2): x86/msi: Make irq_retrigger() functional for posted MSI hrtimers: Make hrtimer_update_function() less expensive
Thomas Hellström (2): drm/xe/bo: Don't include the CCS metadata in the dma-buf sg-table drm/xe: Drop preempt-fences when destroying imported dma-bufs.
Thomas Weißschuh (1): leds: leds-cros_ec: Skip LEDs without color components
Thomas Zimmermann (1): drm/gma500: Remove unused helper psb_fbdev_fb_setcolreg()
Thorsten Blum (2): net: phy: marvell-88q2xxx: Fix clamped value in mv88q2xxx_hwmon_write fbdev: pxafb: Fix multiple clamped values in pxafb_adjust_timing
Tianchu Chen (1): char: applicom: fix NULL pointer dereference in ac_ioctl
Tiezhu Yang (1): LoongArch: Use unsigned long for _end and _text
Toke Høiland-Jørgensen (1): net: openvswitch: Avoid needlessly taking the RTNL on vport destroy
Tony Battersby (4): scsi: qla2xxx: Fix lost interrupts with qlini_mode=disabled scsi: qla2xxx: Fix initiator mode with qlini_mode=exclusive scsi: qla2xxx: Use reinit_completion on mbx_intr_comp scsi: Revert "scsi: qla2xxx: Perform lockless command completion in abort path"
Tuo Li (1): md/raid5: fix possible null-pointer dereferences in raid5_store_group_thread_cnt()
Tzung-Bi Shih (1): platform/chrome: cros_ec_ishtp: Fix UAF after unbinding driver
Udipto Goswami (1): usb: dwc3: keep susphy enabled during exit to avoid controller faults
Uladzislau Rezki (Sony) (1): dm-ebs: Mark full buffer dirty even on partial write
Viacheslav Dubeyko (2): hfsplus: fix volume corruption issue for generic/070 hfsplus: fix volume corruption issue for generic/073
Victor Nogueira (1): net/sched: ets: Remove drr class from the active list if it changes to strict
Vivian Wang (1): lib/crypto: riscv/chacha: Avoid s0/fp register
Vladimir Oltean (1): net: dsa: fix missing put_device() in dsa_tree_find_first_conduit()
Wang Liang (1): netrom: Fix memory leak in nr_sendmsg()
WangYuli (1): LoongArch: Use __pmd()/__pte() for swap entry conversions
Wei Fang (3): net: fec: ERR007885 Workaround for XDP TX path net: enetc: do not transmit redirected XDP frames when the link is down net: stmmac: fix the crash issue for zero copy XDP_TX action
Wenhua Lin (1): serial: sprd: Return -EPROBE_DEFER when uart clock is not ready
Wentao Guan (1): gpio: regmap: Fix memleak in error path in gpio_regmap_register()
Wentao Liang (1): pmdomain: imx: Fix reference count leak in imx_gpc_probe()
Will Rosenberg (1): ipv6: BUG() in pskb_expand_head() as part of calipso_skbuff_setattr()
Xiao Ni (1): md/raid10: wait barrier before returning discard request with REQ_NOWAIT
Xiaole He (2): f2fs: fix age extent cache insertion skip on counter overflow f2fs: fix uninitialized one_time_gc in victim_sel_policy
Xiaolei Wang (1): net: macb: Relocate mog_init_rings() callback from macb_mac_link_up() to macb_open()
Yang Chenzhi (1): hfsplus: fix missing hfs_bnode_get() in __hfs_bnode_create
Ye Bin (1): jbd2: fix the inconsistency between checksum and data in memory for journal sb
Yeoreum Yun (1): smc91x: fix broken irq-context in PREEMPT_RT
Yi Liu (1): iommufd/selftest: Add coverage for reporting max_pasid_log2 via IOMMU_HW_INFO
Yipeng Zou (1): selftests/ftrace: traceonoff_triggers: strip off names
Yongjian Sun (1): ext4: fix incorrect group number assertion in mb_check_buddy
Yongxin Liu (1): x86/fpu: Fix FPU state core dump truncation on CPUs with no extended xfeatures
Yosry Ahmed (2): KVM: nSVM: Avoid incorrect injection of SVM_EXIT_CR0_SEL_WRITE KVM: nSVM: Propagate SVM_EXIT_CR0_SEL_WRITE correctly for LMSW emulation
Yuezhang Mo (2): exfat: fix remount failure in different process environments exfat: zero out post-EOF page cache on file extension
Zheng Yejian (1): kallsyms: Fix wrong "big" kernel symbol type read from procfs
Zhichi Lin (1): scs: fix a wrong parameter in __scs_magic
Zilin Guan (3): cifs: Fix memory and information leak in smb3_reconfigure() vfio/pds: Fix memory leak in pds_vfio_dirty_enable() ksmbd: Fix memory leak in get_file_all_info()
Zqiang (2): sched_ext: Fix incorrect sched_class settings for per-cpu migration tasks usbnet: Fix using smp_processor_id() in preemptible code warnings
caoping (1): net/handshake: restore destructor on submit failure
fuqiang wang (2): KVM: x86: Explicitly set new periodic hrtimer expiration in apic_timer_fn() KVM: x86: Fix VM hard lockup after prolonged inactivity with periodic HV timer
j.turek (1): serial: xilinx_uartps: fix rs485 delay_rts_after_send
xu xin (1): mm/ksm: fix exec/fork inheritance support for prctl
Łukasz Bartosik (1): xhci: dbgtty: fix device unregister: fixup
diff --git a/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml b/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml index 9fce8cd7b0b6..d24950ccea95 100644 --- a/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml @@ -41,7 +41,7 @@ properties: patternProperties: "^sdhci@[0-9a-f]+$": type: object - $ref: mmc-controller.yaml + $ref: sdhci-common.yaml unevaluatedProperties: false
properties: diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml index 76cb9fbfd476..d5286e7cc573 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc7280.yaml @@ -74,6 +74,11 @@ properties: items: - const: pci
+required: + - power-domains + - resets + - reset-names + allOf: - $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml index 15ba2385eb73..b6fb0e37cec5 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sc8280xp.yaml @@ -61,6 +61,9 @@ properties: required: - interconnects - interconnect-names + - power-domains + - resets + - reset-names
allOf: - $ref: qcom,pcie-common.yaml# diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml index 9d569644fda9..e5fa110dce80 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8150.yaml @@ -69,6 +69,11 @@ properties: items: - const: pci
+required: + - power-domains + - resets + - reset-names + allOf: - $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml index 4d060bce6f9d..ddcaddc7602f 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8250.yaml @@ -81,6 +81,11 @@ properties: items: - const: pci
+required: + - power-domains + - resets + - reset-names + allOf: - $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml index 2a4cc41fc710..0069919b9424 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8350.yaml @@ -71,6 +71,11 @@ properties: items: - const: pci
+required: + - power-domains + - resets + - reset-names + allOf: - $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml index 46bd59eefadb..790b11e6763a 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8450.yaml @@ -81,6 +81,11 @@ properties: items: - const: pci
+required: + - power-domains + - resets + - reset-names + allOf: - $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml index 24cb38673581..ff031929525b 100644 --- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml @@ -78,6 +78,11 @@ properties: - const: pci # PCIe core reset - const: link_down # PCIe link down reset
+required: + - power-domains + - resets + - reset-names + allOf: - $ref: qcom,pcie-common.yaml#
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst index 2a794484f62c..d66201299633 100644 --- a/Documentation/driver-api/soundwire/stream.rst +++ b/Documentation/driver-api/soundwire/stream.rst @@ -291,7 +291,7 @@ per stream. From ASoC DPCM framework, this stream state maybe linked to
.. code-block:: c
- int sdw_alloc_stream(char * stream_name); + int sdw_alloc_stream(char * stream_name, enum sdw_stream_type type);
The SoundWire core provides a sdw_startup_stream() helper function, typically called during a dailink .startup() callback, which performs diff --git a/Documentation/driver-api/tty/tty_port.rst b/Documentation/driver-api/tty/tty_port.rst index 5cb90e954fcf..504a353f2682 100644 --- a/Documentation/driver-api/tty/tty_port.rst +++ b/Documentation/driver-api/tty/tty_port.rst @@ -42,9 +42,10 @@ TTY Refcounting TTY Helpers -----------
+.. kernel-doc:: include/linux/tty_port.h + :identifiers: tty_port_tty_hangup tty_port_tty_vhangup .. kernel-doc:: drivers/tty/tty_port.c - :identifiers: tty_port_tty_hangup tty_port_tty_wakeup - + :identifiers: tty_port_tty_wakeup
Modem Signals ------------- diff --git a/Documentation/filesystems/nfs/localio.rst b/Documentation/filesystems/nfs/localio.rst index 20fc901a08f4..7d2dbf75e96d 100644 --- a/Documentation/filesystems/nfs/localio.rst +++ b/Documentation/filesystems/nfs/localio.rst @@ -218,64 +218,30 @@ NFS Client and Server Interlock ===============================
LOCALIO provides the nfs_uuid_t object and associated interfaces to -allow proper network namespace (net-ns) and NFSD object refcounting: - - We don't want to keep a long-term counted reference on each NFSD's - net-ns in the client because that prevents a server container from - completely shutting down. - - So we avoid taking a reference at all and rely on the per-cpu - reference to the server (detailed below) being sufficient to keep - the net-ns active. This involves allowing the NFSD's net-ns exit - code to iterate all active clients and clear their ->net pointers - (which are needed to find the per-cpu-refcount for the nfsd_serv). - - Details: - - - Embed nfs_uuid_t in nfs_client. nfs_uuid_t provides a list_head - that can be used to find the client. It does add the 16-byte - uuid_t to nfs_client so it is bigger than needed (given that - uuid_t is only used during the initial NFS client and server - LOCALIO handshake to determine if they are local to each other). - If that is really a problem we can find a fix. - - - When the nfs server confirms that the uuid_t is local, it moves - the nfs_uuid_t onto a per-net-ns list in NFSD's nfsd_net. - - - When each server's net-ns is shutting down - in a "pre_exit" - handler, all these nfs_uuid_t have their ->net cleared. There is - an rcu_synchronize() call between pre_exit() handlers and exit() - handlers so any caller that sees nfs_uuid_t ->net as not NULL can - safely manage the per-cpu-refcount for nfsd_serv. - - - The client's nfs_uuid_t is passed to nfsd_open_local_fh() so it - can safely dereference ->net in a private rcu_read_lock() section - to allow safe access to the associated nfsd_net and nfsd_serv. - -So LOCALIO required the introduction and use of NFSD's percpu_ref to -interlock nfsd_destroy_serv() and nfsd_open_local_fh(), to ensure each -nn->nfsd_serv is not destroyed while in use by nfsd_open_local_fh(), and +allow proper network namespace (net-ns) and NFSD object refcounting. + +LOCALIO required the introduction and use of NFSD's percpu nfsd_net_ref +to interlock nfsd_shutdown_net() and nfsd_open_local_fh(), to ensure +each net-ns is not destroyed while in use by nfsd_open_local_fh(), and warrants a more detailed explanation:
- nfsd_open_local_fh() uses nfsd_serv_try_get() before opening its + nfsd_open_local_fh() uses nfsd_net_try_get() before opening its nfsd_file handle and then the caller (NFS client) must drop the - reference for the nfsd_file and associated nn->nfsd_serv using - nfs_file_put_local() once it has completed its IO. + reference for the nfsd_file and associated net-ns using + nfsd_file_put_local() once it has completed its IO.
This interlock working relies heavily on nfsd_open_local_fh() being afforded the ability to safely deal with the possibility that the NFSD's net-ns (and nfsd_net by association) may have been destroyed - by nfsd_destroy_serv() via nfsd_shutdown_net() -- which is only - possible given the nfs_uuid_t ->net pointer managemenet detailed - above. - -All told, this elaborate interlock of the NFS client and server has been -verified to fix an easy to hit crash that would occur if an NFSD -instance running in a container, with a LOCALIO client mounted, is -shutdown. Upon restart of the container and associated NFSD the client -would go on to crash due to NULL pointer dereference that occurred due -to the LOCALIO client's attempting to nfsd_open_local_fh(), using -nn->nfsd_serv, without having a proper reference on nn->nfsd_serv. + by nfsd_destroy_serv() via nfsd_shutdown_net(). + +This interlock of the NFS client and server has been verified to fix an +easy to hit crash that would occur if an NFSD instance running in a +container, with a LOCALIO client mounted, is shutdown. Upon restart of +the container and associated NFSD, the client would go on to crash due +to NULL pointer dereference that occurred due to the LOCALIO client's +attempting to nfsd_open_local_fh() without having a proper reference on +NFSD's net-ns.
NFS Client issues IO instead of Server ====================================== @@ -308,16 +274,19 @@ fs/nfs/localio.c:nfs_local_commit().
With normal NFS that makes use of RPC to issue IO to the server, if an application uses O_DIRECT the NFS client will bypass the pagecache but -the NFS server will not. Because the NFS server's use of buffered IO -affords applications to be less precise with their alignment when -issuing IO to the NFS client. LOCALIO can be configured to use O_DIRECT -semantics by setting the 'localio_O_DIRECT_semantics' nfs module +the NFS server will not. The NFS server's use of buffered IO affords +applications to be less precise with their alignment when issuing IO to +the NFS client. But if all applications properly align their IO, LOCALIO +can be configured to use end-to-end O_DIRECT semantics from the NFS +client to the underlying local filesystem, that it is sharing with +the NFS server, by setting the 'localio_O_DIRECT_semantics' nfs module parameter to Y, e.g.:
- echo Y > /sys/module/nfs/parameters/localio_O_DIRECT_semantics + echo Y > /sys/module/nfs/parameters/localio_O_DIRECT_semantics
-Once enabled, it will cause LOCALIO to use O_DIRECT semantics (this may -cause IO to fail if applications do not properly align their IO). +Once enabled, it will cause LOCALIO to use end-to-end O_DIRECT semantics +(but again, this may cause IO to fail if applications do not properly +align their IO).
Security ======== diff --git a/Makefile b/Makefile index 5e640890e980..e8e272e1187b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 12 -SUBLEVEL = 63 +SUBLEVEL = 64 EXTRAVERSION = NAME = Baby Opossum Posse
diff --git a/arch/arm/boot/dts/microchip/sama5d2.dtsi b/arch/arm/boot/dts/microchip/sama5d2.dtsi index 5f8e297e19ed..92ee26fec8b4 100644 --- a/arch/arm/boot/dts/microchip/sama5d2.dtsi +++ b/arch/arm/boot/dts/microchip/sama5d2.dtsi @@ -568,7 +568,7 @@ AT91_XDMAC_DT_PERID(11))>, AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(12))>; dma-names = "tx", "rx"; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; };
@@ -639,7 +639,7 @@ AT91_XDMAC_DT_PERID(13))>, AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(14))>; dma-names = "tx", "rx"; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; };
@@ -851,7 +851,7 @@ AT91_XDMAC_DT_PERID(15))>, AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(16))>; dma-names = "tx", "rx"; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; };
@@ -922,7 +922,7 @@ AT91_XDMAC_DT_PERID(17))>, AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(18))>; dma-names = "tx", "rx"; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; };
@@ -994,7 +994,7 @@ AT91_XDMAC_DT_PERID(19))>, AT91_XDMAC_DT_PER_IF(1) | AT91_XDMAC_DT_PERID(20))>; dma-names = "tx", "rx"; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; };
diff --git a/arch/arm/boot/dts/microchip/sama7g5.dtsi b/arch/arm/boot/dts/microchip/sama7g5.dtsi index 17bcdcf0cf4a..086e1d402421 100644 --- a/arch/arm/boot/dts/microchip/sama7g5.dtsi +++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi @@ -811,7 +811,7 @@ uart4: serial@200 { dma-names = "tx", "rx"; atmel,use-dma-rx; atmel,use-dma-tx; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; }; }; @@ -837,7 +837,7 @@ uart7: serial@200 { dma-names = "tx", "rx"; atmel,use-dma-rx; atmel,use-dma-tx; - atmel,fifo-size = <16>; + atmel,fifo-size = <32>; status = "disabled"; }; }; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts index c8d7eb1814f0..6ec1ef48c2e5 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts @@ -572,6 +572,12 @@ rpi_header_gpio1_pins_default: rpi-header-gpio1-default-pins { J721E_IOPAD(0x234, PIN_INPUT, 7) /* (U3) EXT_REFCLK1.GPIO1_12 */ >; }; + + vdd_sd_dv_pins_default: vdd-sd-dv-default-pins { + pinctrl-single,pins = < + J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */ + >; + }; };
&wkup_pmx0 { @@ -633,12 +639,6 @@ J721E_WKUP_IOPAD(0xd4, PIN_OUTPUT, 7) /* (G26) WKUP_GPIO0_9 */ >; };
- vdd_sd_dv_pins_default: vdd-sd-dv-default-pins { - pinctrl-single,pins = < - J721E_IOPAD(0x1dc, PIN_OUTPUT, 7) /* (Y1) SPI1_CLK.GPIO0_118 */ - >; - }; - wkup_uart0_pins_default: wkup-uart0-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0xa0, PIN_INPUT, 0) /* (J29) WKUP_UART0_RXD */ diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index bdbe9e08664a..9d1c88536ee4 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -16,6 +16,58 @@ #include <asm/sysreg.h> #include <linux/irqchip/arm-gic-v3.h>
+.macro init_el2_hcr val + mov_q x0, \val + + /* + * Compliant CPUs advertise their VHE-onlyness with + * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it + * can reset into an UNKNOWN state and might not read as 1 until it has + * been initialized explicitly. + * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H + * indicating whether the CPU is running in E2H mode. + */ + mrs_s x1, SYS_ID_AA64MMFR4_EL1 + sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH + cmp x1, #0 + b.lt .LnE2H0_@ + + /* + * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised + * as such via ID_AA64MMFR4_EL1.E2H0: + * + * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to + * have HCR_EL2.E2H implemented as RAO/WI. + * + * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest + * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV + * guests on these hosts can write to HCR_EL2.E2H without + * trapping to the hypervisor, but these writes have no + * functional effect. + * + * Handle both cases by checking for an essential VHE property + * (system register remapping) to decide whether we're + * effectively VHE-only or not. + */ + msr hcr_el2, x0 // Setup HCR_EL2 as nVHE + isb + mov x1, #1 // Write something to FAR_EL1 + msr far_el1, x1 + isb + mov x1, #2 // Try to overwrite it via FAR_EL2 + msr far_el2, x1 + isb + mrs x1, far_el1 // If we see the latest write in FAR_EL1, + cmp x1, #2 // we can safely assume we are VHE only. + b.ne .LnVHE_@ // Otherwise, we know that nVHE works. + +.LnE2H0_@: + orr x0, x0, #HCR_E2H + msr hcr_el2, x0 + isb +.LnVHE_@: +.endm + .macro __init_el2_sctlr mov_q x0, INIT_SCTLR_EL2_MMU_OFF msr sctlr_el2, x0 @@ -239,11 +291,6 @@ .Lskip_fgt2_@: .endm
-.macro __init_el2_nvhe_prepare_eret - mov x0, #INIT_PSTATE_EL1 - msr spsr_el2, x0 -.endm - /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index cb68adcabe07..25c08a0d228a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -295,25 +295,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr sctlr_el2, x0 isb 0: - mov_q x0, HCR_HOST_NVHE_FLAGS - - /* - * Compliant CPUs advertise their VHE-onlyness with - * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be - * RES1 in that case. Publish the E2H bit early so that - * it can be picked up by the init_el2_state macro. - * - * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but - * don't advertise it (they predate this relaxation). - */ - mrs_s x1, SYS_ID_AA64MMFR4_EL1 - tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f - - orr x0, x0, #HCR_E2H -1: - msr hcr_el2, x0 - isb
+ init_el2_hcr HCR_HOST_NVHE_FLAGS init_el2_state
/* Hypervisor stub */ @@ -336,7 +319,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr sctlr_el1, x1 mov x2, xzr 3: - __init_el2_nvhe_prepare_eret + mov x0, #INIT_PSTATE_EL1 + msr spsr_el2, x0
mov w0, #BOOT_CPU_MODE_EL2 orr x0, x0, x2 diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index fc1866226067..f8af11189572 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -73,8 +73,12 @@ __do_hyp_init: eret SYM_CODE_END(__kvm_hyp_init)
+/* + * Initialize EL2 CPU state to sane values. + * + * HCR_EL2.E2H must have been initialized already. + */ SYM_CODE_START_LOCAL(__kvm_init_el2_state) - /* Initialize EL2 CPU state to sane values. */ init_el2_state // Clobbers x0..x2 finalise_el2_state ret @@ -206,9 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
2: msr SPsel, #1 // We want to use SP_EL{1,2}
- bl __kvm_init_el2_state + init_el2_hcr 0
- __init_el2_nvhe_prepare_eret + bl __kvm_init_el2_state
/* Enable MMU, set vectors and stack. */ mov x0, x28 diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index dfe8fe0f7eaf..bd0f308b694c 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) if (is_cpu_on) release_boot_args(boot_args);
+ write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR); + write_sysreg(INIT_PSTATE_EL1, SPSR_EL2); + __host_enter(host_ctxt); }
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ca6d002a6f13..82b57436f2f1 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -871,7 +871,7 @@ static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx) arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) return;
- if (capable(CAP_SYS_ADMIN)) + if (ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) return;
if (supports_clearbhb(SCOPE_SYSTEM)) { diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 7dc5bdd352a2..32899160086d 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -295,9 +295,9 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __swp_offset(x) ((x).val >> 24) #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +#define __swp_entry_to_pte(x) __pte((x).val) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) -#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) +#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE)
static inline int pte_swp_exclusive(pte_t pte) { diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S index 0c65cf09110c..4e05adb40504 100644 --- a/arch/loongarch/kernel/mcount_dyn.S +++ b/arch/loongarch/kernel/mcount_dyn.S @@ -94,7 +94,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) * at the callsite, so there is no need to restore the T series regs. */ ftrace_common_return: - PTR_L ra, sp, PT_R1 PTR_L a0, sp, PT_R4 PTR_L a1, sp, PT_R5 PTR_L a2, sp, PT_R6 @@ -104,12 +103,17 @@ ftrace_common_return: PTR_L a6, sp, PT_R10 PTR_L a7, sp, PT_R11 PTR_L fp, sp, PT_R22 - PTR_L t0, sp, PT_ERA PTR_L t1, sp, PT_R13 - PTR_ADDI sp, sp, PT_SIZE bnez t1, .Ldirect + + PTR_L ra, sp, PT_R1 + PTR_L t0, sp, PT_ERA + PTR_ADDI sp, sp, PT_SIZE jr t0 .Ldirect: + PTR_L t0, sp, PT_R1 + PTR_L ra, sp, PT_ERA + PTR_ADDI sp, sp, PT_SIZE jr t1 SYM_CODE_END(ftrace_common)
@@ -161,6 +165,8 @@ SYM_CODE_END(return_to_handler) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS SYM_CODE_START(ftrace_stub_direct_tramp) UNWIND_HINT_UNDEFINED - jr t0 + move t1, ra + move ra, t0 + jr t1 SYM_CODE_END(ftrace_stub_direct_tramp) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index b5e2312a2fca..76abbb8d2931 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -183,7 +183,7 @@ static inline void __init *determine_relocation_address(void) if (kaslr_disabled()) return destination;
- kernel_length = (long)_end - (long)_text; + kernel_length = (unsigned long)_end - (unsigned long)_text;
random_offset = get_random_boot() << 16; random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); @@ -232,7 +232,7 @@ unsigned long __init relocate_kernel(void) early_memunmap(cmdline, COMMAND_LINE_SIZE);
if (random_offset) { - kernel_length = (long)(_end) - (long)(_text); + kernel_length = (unsigned long)(_end) - (unsigned long)(_text);
/* Copy the kernel to it's new location */ memcpy(location_new, _text, kernel_length); diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 2ceb198ae8c8..3cfe591b66a4 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -56,6 +56,7 @@ #define SMBIOS_FREQLOW_MASK 0xFF #define SMBIOS_CORE_PACKAGE_OFFSET 0x23 #define SMBIOS_THREAD_PACKAGE_OFFSET 0x25 +#define SMBIOS_THREAD_PACKAGE_2_OFFSET 0x2E #define LOONGSON_EFI_ENABLE (1 << 3)
unsigned long fw_arg0, fw_arg1, fw_arg2; @@ -126,7 +127,12 @@ static void __init parse_cpu_table(const struct dmi_header *dm) cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]); - loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); + loongson_sysconf.cores_per_package = *(u8 *)(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); + if (dm->length >= 0x30 && loongson_sysconf.cores_per_package == 0xff) { + /* SMBIOS 3.0+ has ThreadCount2 for more than 255 threads */ + loongson_sysconf.cores_per_package = + *(u16 *)(dmi_data + SMBIOS_THREAD_PACKAGE_2_OFFSET); + }
pr_info("CpuClock = %llu\n", cpu_clock_freq); } diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S index 31dd8199b245..b42c0dcf6263 100644 --- a/arch/loongarch/kernel/switch.S +++ b/arch/loongarch/kernel/switch.S @@ -25,8 +25,8 @@ SYM_FUNC_START(__switch_to) stptr.d a4, a0, THREAD_SCHED_CFA #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) la t7, __stack_chk_guard - LONG_L t8, a1, TASK_STACK_CANARY - LONG_S t8, t7, 0 + ldptr.d t8, a1, TASK_STACK_CANARY + stptr.d t8, t7, 0 #endif move tp, a2 cpu_restore_nonscratch a1 diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 5ba3249cea98..29a26b5e7416 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -231,6 +231,8 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn) * goto out; */ tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0]; + emit_zext_32(ctx, a2, true); + off = offsetof(struct bpf_array, map.max_entries); emit_insn(ctx, ldwu, t1, a1, off); /* bgeu $a2, $t1, jmp_offset */ @@ -895,6 +897,22 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext if (ret < 0) return ret;
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { + const struct btf_func_model *m; + int i; + + m = bpf_jit_find_kfunc_model(ctx->prog, insn); + if (!m) + return -EINVAL; + + for (i = 0; i < m->nr_args; i++) { + u8 reg = regmap[BPF_REG_1 + i]; + bool sign = m->arg_flags[i] & BTF_FMODEL_SIGNED_ARG; + + emit_abi_ext(ctx, reg, m->arg_size[i], sign); + } + } + move_addr(ctx, t1, func_addr); emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h index f9c569f53949..9352ea554530 100644 --- a/arch/loongarch/net/bpf_jit.h +++ b/arch/loongarch/net/bpf_jit.h @@ -87,6 +87,32 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo emit_insn(ctx, addiw, reg, reg, 0); }
+/* Emit proper extension according to ABI requirements. + * Note that it requires a value of size `size` already resides in register `reg`. + */ +static inline void emit_abi_ext(struct jit_ctx *ctx, int reg, u8 size, bool sign) +{ + /* ABI requires unsigned char/short to be zero-extended */ + if (!sign && (size == 1 || size == 2)) + return; + + switch (size) { + case 1: + emit_insn(ctx, extwb, reg, reg); + break; + case 2: + emit_insn(ctx, extwh, reg, reg); + break; + case 4: + emit_insn(ctx, addiw, reg, reg, 0); + break; + case 8: + break; + default: + pr_warn("bpf_jit: invalid size %d for extension\n", size); + } +} + static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr) { u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52; diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c index 927dd31f82b9..ea4dbac0b47b 100644 --- a/arch/loongarch/pci/pci.c +++ b/arch/loongarch/pci/pci.c @@ -15,6 +15,7 @@ #define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 #define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06 #define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36 +#define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val) @@ -98,3 +99,4 @@ static void pci_fixup_vgadev(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev); diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index f39e85fd58fa..b15615b28569 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -54,10 +54,20 @@ static inline void ftrace_dyn_arch_init_insns(void) u32 *buf; unsigned int v1;
- /* la v1, _mcount */ - v1 = 3; - buf = (u32 *)&insn_la_mcount[0]; - UASM_i_LA(&buf, v1, MCOUNT_ADDR); + /* If we are not in compat space, the number of generated + * instructions will exceed the maximum expected limit of 2. + * To prevent buffer overflow, we avoid generating them. + * insn_la_mcount will not be used later in ftrace_make_call. + */ + if (uasm_in_compat_space_p(MCOUNT_ADDR)) { + /* la v1, _mcount */ + v1 = 3; + buf = (u32 *)&insn_la_mcount[0]; + UASM_i_LA(&buf, v1, MCOUNT_ADDR); + } else { + pr_warn("ftrace: mcount address beyond 32 bits is not supported (%lX)\n", + MCOUNT_ADDR); + }
/* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; @@ -189,6 +199,13 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip;
+ /* When the code to patch does not belong to the kernel code + * space, we must use insn_la_mcount. However, if MCOUNT_ADDR + * is not in compat space, insn_la_mcount is not usable. + */ + if (!core_kernel_text(ip) && !uasm_in_compat_space_p(MCOUNT_ADDR)) + return -EFAULT; + new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c index d20eec742bfa..f6e66c858e69 100644 --- a/arch/mips/sgi-ip22/ip22-gio.c +++ b/arch/mips/sgi-ip22/ip22-gio.c @@ -373,7 +373,8 @@ static void ip22_check_gio(int slotno, unsigned long addr, int irq) gio_dev->resource.flags = IORESOURCE_MEM; gio_dev->irq = irq; dev_set_name(&gio_dev->dev, "%d", slotno); - gio_device_register(gio_dev); + if (gio_device_register(gio_dev)) + gio_dev_put(gio_dev); } else printk(KERN_INFO "GIO: slot %d : Empty\n", slotno); } diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 9abfe65492c6..3de4b5933b10 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -258,6 +258,8 @@ int main(void) BLANK(); DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); + DEFINE(TIF_32BIT_PA_BIT, 31-TIF_32BIT); + BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index f4bf61a34701..e04c5d806c10 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1059,8 +1059,6 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ STREG %r17, PT_IOR(%r29)
#if defined(CONFIG_64BIT) - b,n intr_save2 - skip_save_ior: /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we * need to adjust iasq/iaoq here in the same way we adjusted isr/ior @@ -1069,10 +1067,17 @@ skip_save_ior: bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 LDREG PT_IASQ0(%r29), %r16 LDREG PT_IAOQ0(%r29), %r17 - /* adjust iasq/iaoq */ + /* adjust iasq0/iaoq0 */ space_adjust %r16,%r17,%r1 STREG %r16, PT_IASQ0(%r29) STREG %r17, PT_IAOQ0(%r29) + + LDREG PT_IASQ1(%r29), %r16 + LDREG PT_IAOQ1(%r29), %r17 + /* adjust iasq1/iaoq1 */ + space_adjust %r16,%r17,%r1 + STREG %r16, PT_IASQ1(%r29) + STREG %r17, PT_IAOQ1(%r29) #else skip_save_ior: #endif @@ -1841,6 +1846,10 @@ syscall_restore_rfi: extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */
+#ifdef CONFIG_64BIT + extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0 + depi -1,4,1,%r20 /* W bit */ +#endif STREG %r20,TASK_PT_PSW(%r1)
/* Always store space registers, since sr3 can be changed (e.g. fork) */ @@ -1854,7 +1863,6 @@ syscall_restore_rfi: STREG %r25,TASK_PT_IASQ0(%r1) STREG %r25,TASK_PT_IASQ1(%r1)
- /* XXX W bit??? */ /* Now if old D bit is clear, it means we didn't save all registers * on syscall entry, so do that now. This only happens on TRACEME * calls, or if someone attached to us while we were on a syscall. diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c index 53b3b2621457..78704927453a 100644 --- a/arch/powerpc/boot/addnote.c +++ b/arch/powerpc/boot/addnote.c @@ -68,8 +68,8 @@ static int e_class = ELFCLASS32; #define PUT_16BE(off, v)(buf[off] = ((v) >> 8) & 0xff, \ buf[(off) + 1] = (v) & 0xff) #define PUT_32BE(off, v)(PUT_16BE((off), (v) >> 16L), PUT_16BE((off) + 2, (v))) -#define PUT_64BE(off, v)((PUT_32BE((off), (v) >> 32L), \ - PUT_32BE((off) + 4, (v)))) +#define PUT_64BE(off, v)((PUT_32BE((off), (unsigned long long)(v) >> 32L), \ + PUT_32BE((off) + 4, (unsigned long long)(v))))
#define GET_16LE(off) ((buf[off]) + (buf[(off)+1] << 8)) #define GET_32LE(off) (GET_16LE(off) + (GET_16LE((off)+2U) << 16U)) @@ -78,7 +78,8 @@ static int e_class = ELFCLASS32; #define PUT_16LE(off, v) (buf[off] = (v) & 0xff, \ buf[(off) + 1] = ((v) >> 8) & 0xff) #define PUT_32LE(off, v) (PUT_16LE((off), (v)), PUT_16LE((off) + 2, (v) >> 16L)) -#define PUT_64LE(off, v) (PUT_32LE((off), (v)), PUT_32LE((off) + 4, (v) >> 32L)) +#define PUT_64LE(off, v) (PUT_32LE((off), (unsigned long long)(v)), \ + PUT_32LE((off) + 4, (unsigned long long)(v) >> 32L))
#define GET_16(off) (e_data == ELFDATA2MSB ? GET_16BE(off) : GET_16LE(off)) #define GET_32(off) (e_data == ELFDATA2MSB ? GET_32BE(off) : GET_32LE(off)) diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h index e43534da5207..4be2200a3c7e 100644 --- a/arch/powerpc/include/asm/book3s/32/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h @@ -11,6 +11,7 @@ void hash__flush_tlb_mm(struct mm_struct *mm); void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end); +void hash__flush_gather(struct mmu_gather *tlb);
#ifdef CONFIG_SMP void _tlbie(unsigned long address); @@ -29,7 +30,9 @@ void _tlbia(void); static inline void tlb_flush(struct mmu_gather *tlb) { /* 603 needs to flush the whole TLB here since it doesn't use a hash table. */ - if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + hash__flush_gather(tlb); + else _tlbia(); }
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 1c4eebbc69c9..e1f77e2eead4 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -524,7 +524,6 @@ void slb_save_contents(struct slb_entry *slb_ptr); void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void); -void preload_new_slb_context(unsigned long start, unsigned long sp);
#ifdef CONFIG_PPC_64S_HASH_MMU void slb_set_size(u16 size); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 7f63f1cdc6c3..ca00c4824e31 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -20,6 +20,7 @@ #include <asm/io.h> #include <asm/processor.h> #include <asm/udbg.h> +#include <asm/setup.h>
#define NO_SCROLL
@@ -463,7 +464,7 @@ static noinline void draw_byte(unsigned char c, long locX, long locY) { unsigned char *base = calc_base(locX << 3, locY << 4); unsigned int font_index = c * 16; - const unsigned char *font = font_sun_8x16.data + font_index; + const unsigned char *font = PTRRELOC(font_sun_8x16.data) + font_index; int rb = dispDeviceRowBytes;
rmci_maybe_on(); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ff61a3e7984c..40b2cb5f2db2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1897,8 +1897,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) return 0; }
-void preload_new_slb_context(unsigned long start, unsigned long sp); - /* * Set up a thread for executing a new program */ @@ -1906,9 +1904,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) { #ifdef CONFIG_PPC64 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ - - if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled()) - preload_new_slb_context(start, sp); #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index 222aa326dace..825ab8a88f18 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -202,6 +202,23 @@ static void kexec_prepare_cpus_wait(int wait_state) mb(); }
+ +/* + * The add_cpu() call in wake_offline_cpus() can fail as cpu_bootable() + * returns false for CPUs that fail the cpu_smt_thread_allowed() check + * or non primary threads if SMT is disabled. Re-enable SMT and set the + * number of SMT threads to threads per core. + */ +static void kexec_smt_reenable(void) +{ +#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) + lock_device_hotplug(); + cpu_smt_num_threads = threads_per_core; + cpu_smt_control = CPU_SMT_ENABLED; + unlock_device_hotplug(); +#endif +} + /* * We need to make sure each present CPU is online. The next kernel will scan * the device tree and assume primary threads are online and query secondary @@ -216,6 +233,8 @@ static void wake_offline_cpus(void) { int cpu = 0;
+ kexec_smt_reenable(); + for_each_present_cpu(cpu) { if (!cpu_online(cpu)) { printk(KERN_INFO "kexec: Waking offline cpu %d.\n", diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c index 9ad6b56bfec9..e54a7b011232 100644 --- a/arch/powerpc/mm/book3s32/tlb.c +++ b/arch/powerpc/mm/book3s32/tlb.c @@ -105,3 +105,12 @@ void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); } EXPORT_SYMBOL(hash__flush_tlb_page); + +void hash__flush_gather(struct mmu_gather *tlb) +{ + if (tlb->fullmm || tlb->need_flush_all) + hash__flush_tlb_mm(tlb->mm); + else + hash__flush_range(tlb->mm, tlb->start, tlb->end); +} +EXPORT_SYMBOL(hash__flush_gather); diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h index a57a25f06a21..c26a6f0c90fc 100644 --- a/arch/powerpc/mm/book3s64/internal.h +++ b/arch/powerpc/mm/book3s64/internal.h @@ -24,8 +24,6 @@ static inline bool stress_hpt(void)
void hpt_do_stress(unsigned long ea, unsigned long hpte_group);
-void slb_setup_new_exec(void); - void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */ diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 1715b07c630c..997bc13a690e 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -150,8 +150,6 @@ static int hash__init_new_context(struct mm_struct *mm) void hash__setup_new_exec(void) { slice_setup_new_exec(); - - slb_setup_new_exec(); } #else static inline int hash__init_new_context(struct mm_struct *mm) diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c index f2708c8629a5..8e8d5da7b482 100644 --- a/arch/powerpc/mm/book3s64/slb.c +++ b/arch/powerpc/mm/book3s64/slb.c @@ -328,94 +328,6 @@ static void preload_age(struct thread_info *ti) ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; }
-void slb_setup_new_exec(void) -{ - struct thread_info *ti = current_thread_info(); - struct mm_struct *mm = current->mm; - unsigned long exec = 0x10000000; - - WARN_ON(irqs_disabled()); - - /* - * preload cache can only be used to determine whether a SLB - * entry exists if it does not start to overflow. - */ - if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) - return; - - hard_irq_disable(); - - /* - * We have no good place to clear the slb preload cache on exec, - * flush_thread is about the earliest arch hook but that happens - * after we switch to the mm and have already preloaded the SLBEs. - * - * For the most part that's probably okay to use entries from the - * previous exec, they will age out if unused. It may turn out to - * be an advantage to clear the cache before switching to it, - * however. - */ - - /* - * preload some userspace segments into the SLB. - * Almost all 32 and 64bit PowerPC executables are linked at - * 0x10000000 so it makes sense to preload this segment. - */ - if (!is_kernel_addr(exec)) { - if (preload_add(ti, exec)) - slb_allocate_user(mm, exec); - } - - /* Libraries and mmaps. */ - if (!is_kernel_addr(mm->mmap_base)) { - if (preload_add(ti, mm->mmap_base)) - slb_allocate_user(mm, mm->mmap_base); - } - - /* see switch_slb */ - asm volatile("isync" : : : "memory"); - - local_irq_enable(); -} - -void preload_new_slb_context(unsigned long start, unsigned long sp) -{ - struct thread_info *ti = current_thread_info(); - struct mm_struct *mm = current->mm; - unsigned long heap = mm->start_brk; - - WARN_ON(irqs_disabled()); - - /* see above */ - if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) - return; - - hard_irq_disable(); - - /* Userspace entry address. */ - if (!is_kernel_addr(start)) { - if (preload_add(ti, start)) - slb_allocate_user(mm, start); - } - - /* Top of stack, grows down. */ - if (!is_kernel_addr(sp)) { - if (preload_add(ti, sp)) - slb_allocate_user(mm, sp); - } - - /* Bottom of heap, grows up. */ - if (heap && !is_kernel_addr(heap)) { - if (preload_add(ti, heap)) - slb_allocate_user(mm, heap); - } - - /* see switch_slb */ - asm volatile("isync" : : : "memory"); - - local_irq_enable(); -} - static void slb_cache_slbie_kernel(unsigned int index) { unsigned long slbie_data = get_paca()->slb_cache[index]; diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 5f4037c1d7fe..851670ad515a 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -532,7 +532,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, newpage); - balloon_page_delete(page); + __count_vm_event(BALLOON_MIGRATE); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -542,6 +542,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, */ plpar_page_set_active(page);
+ balloon_page_finalize(page); /* balloon page list reference */ put_page(page);
@@ -550,7 +551,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
static void cmm_balloon_compaction_init(void) { - balloon_devinfo_init(&b_dev_info); b_dev_info.migratepage = cmm_migratepage; } #else /* CONFIG_BALLOON_COMPACTION */ @@ -572,6 +572,7 @@ static int cmm_init(void) if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate) return -EOPNOTSUPP;
+ balloon_devinfo_init(&b_dev_info); cmm_balloon_compaction_init();
rc = register_oom_notifier(&cmm_oom_nb); diff --git a/arch/riscv/crypto/chacha-riscv64-zvkb.S b/arch/riscv/crypto/chacha-riscv64-zvkb.S index bf057737ac69..fbef93503571 100644 --- a/arch/riscv/crypto/chacha-riscv64-zvkb.S +++ b/arch/riscv/crypto/chacha-riscv64-zvkb.S @@ -60,7 +60,8 @@ #define VL t2 #define STRIDE t3 #define NROUNDS t4 -#define KEY0 s0 +#define KEY0 t5 +// Avoid s0/fp to allow for unwinding #define KEY1 s1 #define KEY2 s2 #define KEY3 s3 @@ -141,7 +142,6 @@ SYM_FUNC_START(chacha20_zvkb) srli LEN, LEN, 6 // Bytes to blocks
addi sp, sp, -96 - sd s0, 0(sp) sd s1, 8(sp) sd s2, 16(sp) sd s3, 24(sp) @@ -277,7 +277,6 @@ SYM_FUNC_START(chacha20_zvkb) add INP, INP, TMP bnez LEN, .Lblock_loop
- ld s0, 0(sp) ld s1, 8(sp) ld s2, 16(sp) ld s3, 24(sp) diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h index 2cd28af50dd4..3d64a2251699 100644 --- a/arch/s390/include/uapi/asm/ipl.h +++ b/arch/s390/include/uapi/asm/ipl.h @@ -15,6 +15,7 @@ struct ipl_pl_hdr { #define IPL_PL_FLAG_IPLPS 0x80 #define IPL_PL_FLAG_SIPL 0x40 #define IPL_PL_FLAG_IPLSR 0x20 +#define IPL_PL_FLAG_SBP 0x10
/* IPL Parameter Block header */ struct ipl_pb_hdr { diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 5fa203f4bc6b..f5a56c341501 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -261,6 +261,24 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ sys_##_prefix##_##_name##_show, \ sys_##_prefix##_##_name##_store)
+#define DEFINE_IPL_ATTR_BOOTPROG_RW(_prefix, _name, _fmt_out, _fmt_in, _hdr, _value) \ + IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \ +static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + unsigned long long value; \ + if (sscanf(buf, _fmt_in, &value) != 1) \ + return -EINVAL; \ + (_value) = value; \ + (_hdr).flags &= ~IPL_PL_FLAG_SBP; \ + return len; \ +} \ +static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ + __ATTR(_name, 0644, \ + sys_##_prefix##_##_name##_show, \ + sys_##_prefix##_##_name##_store) + #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ @@ -817,12 +835,13 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n", reipl_block_fcp->fcp.wwpn); DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n", reipl_block_fcp->fcp.lun); -DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", - reipl_block_fcp->fcp.bootprog); DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n", reipl_block_fcp->fcp.br_lba); DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", reipl_block_fcp->fcp.devno); +DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", + reipl_block_fcp->hdr, + reipl_block_fcp->fcp.bootprog);
static void reipl_get_ascii_loadparm(char *loadparm, struct ipl_parameter_block *ibp) @@ -941,10 +960,11 @@ DEFINE_IPL_ATTR_RW(reipl_nvme, fid, "0x%08llx\n", "%llx\n", reipl_block_nvme->nvme.fid); DEFINE_IPL_ATTR_RW(reipl_nvme, nsid, "0x%08llx\n", "%llx\n", reipl_block_nvme->nvme.nsid); -DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n", - reipl_block_nvme->nvme.bootprog); DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n", reipl_block_nvme->nvme.br_lba); +DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n", + reipl_block_nvme->hdr, + reipl_block_nvme->nvme.bootprog);
static struct attribute *reipl_nvme_attrs[] = { &sys_reipl_nvme_fid_attr.attr, @@ -1037,8 +1057,9 @@ static struct bin_attribute *reipl_eckd_bin_attrs[] = { };
DEFINE_IPL_CCW_ATTR_RW(reipl_eckd, device, reipl_block_eckd->eckd); -DEFINE_IPL_ATTR_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n", - reipl_block_eckd->eckd.bootprog); +DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n", + reipl_block_eckd->hdr, + reipl_block_eckd->eckd.bootprog);
static struct attribute *reipl_eckd_attrs[] = { &sys_reipl_eckd_device_attr.attr, @@ -1566,12 +1587,13 @@ DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n", dump_block_fcp->fcp.wwpn); DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n", dump_block_fcp->fcp.lun); -DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", - dump_block_fcp->fcp.bootprog); DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n", dump_block_fcp->fcp.br_lba); DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", dump_block_fcp->fcp.devno); +DEFINE_IPL_ATTR_BOOTPROG_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", + dump_block_fcp->hdr, + dump_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_SCP_DATA_RW(dump_fcp, dump_block_fcp->hdr, dump_block_fcp->fcp, @@ -1603,10 +1625,11 @@ DEFINE_IPL_ATTR_RW(dump_nvme, fid, "0x%08llx\n", "%llx\n", dump_block_nvme->nvme.fid); DEFINE_IPL_ATTR_RW(dump_nvme, nsid, "0x%08llx\n", "%llx\n", dump_block_nvme->nvme.nsid); -DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n", - dump_block_nvme->nvme.bootprog); DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n", dump_block_nvme->nvme.br_lba); +DEFINE_IPL_ATTR_BOOTPROG_RW(dump_nvme, bootprog, "%lld\n", "%llx\n", + dump_block_nvme->hdr, + dump_block_nvme->nvme.bootprog);
DEFINE_IPL_ATTR_SCP_DATA_RW(dump_nvme, dump_block_nvme->hdr, dump_block_nvme->nvme, @@ -1634,8 +1657,9 @@ static struct attribute_group dump_nvme_attr_group = {
/* ECKD dump device attributes */ DEFINE_IPL_CCW_ATTR_RW(dump_eckd, device, dump_block_eckd->eckd); -DEFINE_IPL_ATTR_RW(dump_eckd, bootprog, "%lld\n", "%llx\n", - dump_block_eckd->eckd.bootprog); +DEFINE_IPL_ATTR_BOOTPROG_RW(dump_eckd, bootprog, "%lld\n", "%llx\n", + dump_block_eckd->hdr, + dump_block_eckd->eckd.bootprog);
IPL_ATTR_BR_CHR_SHOW_FN(dump, dump_block_eckd->eckd); IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd); diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S index b50b35ff1fdb..ca2644820dfc 100644 --- a/arch/x86/crypto/blake2s-core.S +++ b/arch/x86/crypto/blake2s-core.S @@ -54,7 +54,7 @@ SYM_FUNC_START(blake2s_compress_ssse3) movdqa ROT16(%rip),%xmm12 movdqa ROR328(%rip),%xmm13 movdqu 0x20(%rdi),%xmm14 - movq %rcx,%xmm15 + movd %ecx,%xmm15 leaq SIGMA+0xa0(%rip),%r8 jmp .Lbeginofloop .align 32 @@ -179,7 +179,7 @@ SYM_FUNC_START(blake2s_compress_avx512) vmovdqu (%rdi),%xmm0 vmovdqu 0x10(%rdi),%xmm1 vmovdqu 0x20(%rdi),%xmm4 - vmovq %rcx,%xmm5 + vmovd %ecx,%xmm5 vmovdqa IV(%rip),%xmm14 vmovdqa IV+16(%rip),%xmm15 jmp .Lblake2s_compress_avx512_mainloop diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 51efd2da4d7f..7b9321c48a90 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -21,11 +21,6 @@ #include <linux/uaccess.h> #include <linux/init.h>
-#ifdef CONFIG_XEN_PV -#include <xen/xen-ops.h> -#include <xen/events.h> -#endif - #include <asm/apic.h> #include <asm/desc.h> #include <asm/traps.h> @@ -454,70 +449,3 @@ SYSCALL_DEFINE0(ni_syscall) { return -ENOSYS; } - -#ifdef CONFIG_XEN_PV -#ifndef CONFIG_PREEMPTION -/* - * Some hypercalls issued by the toolstack can take many 10s of - * seconds. Allow tasks running hypercalls via the privcmd driver to - * be voluntarily preempted even if full kernel preemption is - * disabled. - * - * Such preemptible hypercalls are bracketed by - * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() - * calls. - */ -DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); -EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); - -/* - * In case of scheduling the flag must be cleared and restored after - * returning from schedule as the task might move to a different CPU. - */ -static __always_inline bool get_and_clear_inhcall(void) -{ - bool inhcall = __this_cpu_read(xen_in_preemptible_hcall); - - __this_cpu_write(xen_in_preemptible_hcall, false); - return inhcall; -} - -static __always_inline void restore_inhcall(bool inhcall) -{ - __this_cpu_write(xen_in_preemptible_hcall, inhcall); -} -#else -static __always_inline bool get_and_clear_inhcall(void) { return false; } -static __always_inline void restore_inhcall(bool inhcall) { } -#endif - -static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs) -{ - struct pt_regs *old_regs = set_irq_regs(regs); - - inc_irq_stat(irq_hv_callback_count); - - xen_evtchn_do_upcall(); - - set_irq_regs(old_regs); -} - -__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) -{ - irqentry_state_t state = irqentry_enter(regs); - bool inhcall; - - instrumentation_begin(); - run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); - - inhcall = get_and_clear_inhcall(); - if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { - irqentry_exit_cond_resched(); - instrumentation_end(); - restore_inhcall(inhcall); - } else { - instrumentation_end(); - irqentry_exit(regs, state); - } -} -#endif /* CONFIG_XEN_PV */ diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index b4a1a2576510..36d28edf7a53 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -762,7 +762,12 @@ static void amd_pmu_enable_all(int added) if (!test_bit(idx, cpuc->active_mask)) continue;
- amd_pmu_enable_event(cpuc->events[idx]); + /* + * FIXME: cpuc->events[idx] can become NULL in a subtle race + * condition with NMI->throttle->x86_pmu_stop(). + */ + if (cpuc->events[idx]) + amd_pmu_enable_event(cpuc->events[idx]); } }
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index cdf7bf029836..0bb60fde1f4f 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -595,14 +595,11 @@ static int amd_uncore_df_event_init(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int ret = amd_uncore_event_init(event);
- if (ret || pmu_version < 2) - return ret; - hwc->config = event->attr.config & (pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB : AMD64_RAW_EVENT_MASK_NB);
- return 0; + return ret; }
static int amd_uncore_df_add(struct perf_event *event, int flags) diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 5036f13ab69f..e8415958bcb2 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -72,4 +72,11 @@ static inline void panic_if_irq_remap(const char *msg) }
#endif /* CONFIG_IRQ_REMAP */ + +#ifdef CONFIG_X86_POSTED_MSI +void intel_ack_posted_msi_irq(struct irq_data *irqd); +#else +#define intel_ack_posted_msi_irq NULL +#endif + #endif /* __X86_IRQ_REMAPPING_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 5a83fbd9bc0b..eb5b1e2aa700 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -187,12 +187,12 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code);
-static inline unsigned long regs_return_value(struct pt_regs *regs) +static __always_inline unsigned long regs_return_value(struct pt_regs *regs) { return regs->ax; }
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +static __always_inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) { regs->ax = rc; } @@ -277,34 +277,34 @@ static __always_inline bool ip_within_syscall_gap(struct pt_regs *regs) } #endif
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { return regs->sp; }
-static inline unsigned long instruction_pointer(struct pt_regs *regs) +static __always_inline unsigned long instruction_pointer(struct pt_regs *regs) { return regs->ip; }
-static inline void instruction_pointer_set(struct pt_regs *regs, - unsigned long val) +static __always_inline +void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { regs->ip = val; }
-static inline unsigned long frame_pointer(struct pt_regs *regs) +static __always_inline unsigned long frame_pointer(struct pt_regs *regs) { return regs->bp; }
-static inline unsigned long user_stack_pointer(struct pt_regs *regs) +static __always_inline unsigned long user_stack_pointer(struct pt_regs *regs) { return regs->sp; }
-static inline void user_stack_pointer_set(struct pt_regs *regs, - unsigned long val) +static __always_inline +void user_stack_pointer_set(struct pt_regs *regs, unsigned long val) { regs->sp = val; } diff --git a/arch/x86/kernel/cpu/mce/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c index 89e31e1e5c9c..c32691324baa 100644 --- a/arch/x86/kernel/cpu/mce/threshold.c +++ b/arch/x86/kernel/cpu/mce/threshold.c @@ -85,7 +85,8 @@ void cmci_storm_end(unsigned int bank) { struct mca_storm_desc *storm = this_cpu_ptr(&storm_desc);
- __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); + if (!mce_flags.amd_threshold) + __clear_bit(bank, this_cpu_ptr(mce_poll_banks)); storm->banks[bank].history = 0; storm->banks[bank].in_storm_mode = false;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 7e997360223b..24709a558963 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -174,50 +174,61 @@ static u32 cpuid_to_ucode_rev(unsigned int val) return p.ucode_rev; }
+static u32 get_cutoff_revision(u32 rev) +{ + switch (rev >> 8) { + case 0x80012: return 0x8001277; break; + case 0x80082: return 0x800820f; break; + case 0x83010: return 0x830107c; break; + case 0x86001: return 0x860010e; break; + case 0x86081: return 0x8608108; break; + case 0x87010: return 0x8701034; break; + case 0x8a000: return 0x8a0000a; break; + case 0xa0010: return 0xa00107a; break; + case 0xa0011: return 0xa0011da; break; + case 0xa0012: return 0xa001243; break; + case 0xa0082: return 0xa00820e; break; + case 0xa1011: return 0xa101153; break; + case 0xa1012: return 0xa10124e; break; + case 0xa1081: return 0xa108109; break; + case 0xa2010: return 0xa20102f; break; + case 0xa2012: return 0xa201212; break; + case 0xa4041: return 0xa404109; break; + case 0xa5000: return 0xa500013; break; + case 0xa6012: return 0xa60120a; break; + case 0xa7041: return 0xa704109; break; + case 0xa7052: return 0xa705208; break; + case 0xa7080: return 0xa708009; break; + case 0xa70c0: return 0xa70C009; break; + case 0xaa001: return 0xaa00116; break; + case 0xaa002: return 0xaa00218; break; + case 0xb0021: return 0xb002146; break; + case 0xb0081: return 0xb008111; break; + case 0xb1010: return 0xb101046; break; + case 0xb2040: return 0xb204031; break; + case 0xb4040: return 0xb404031; break; + case 0xb4041: return 0xb404101; break; + case 0xb6000: return 0xb600031; break; + case 0xb6080: return 0xb608031; break; + case 0xb7000: return 0xb700031; break; + default: break; + + } + return 0; +} + static bool need_sha_check(u32 cur_rev) { + u32 cutoff; + if (!cur_rev) { cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev); }
- switch (cur_rev >> 8) { - case 0x80012: return cur_rev <= 0x8001277; break; - case 0x80082: return cur_rev <= 0x800820f; break; - case 0x83010: return cur_rev <= 0x830107c; break; - case 0x86001: return cur_rev <= 0x860010e; break; - case 0x86081: return cur_rev <= 0x8608108; break; - case 0x87010: return cur_rev <= 0x8701034; break; - case 0x8a000: return cur_rev <= 0x8a0000a; break; - case 0xa0010: return cur_rev <= 0xa00107a; break; - case 0xa0011: return cur_rev <= 0xa0011da; break; - case 0xa0012: return cur_rev <= 0xa001243; break; - case 0xa0082: return cur_rev <= 0xa00820e; break; - case 0xa1011: return cur_rev <= 0xa101153; break; - case 0xa1012: return cur_rev <= 0xa10124e; break; - case 0xa1081: return cur_rev <= 0xa108109; break; - case 0xa2010: return cur_rev <= 0xa20102f; break; - case 0xa2012: return cur_rev <= 0xa201212; break; - case 0xa4041: return cur_rev <= 0xa404109; break; - case 0xa5000: return cur_rev <= 0xa500013; break; - case 0xa6012: return cur_rev <= 0xa60120a; break; - case 0xa7041: return cur_rev <= 0xa704109; break; - case 0xa7052: return cur_rev <= 0xa705208; break; - case 0xa7080: return cur_rev <= 0xa708009; break; - case 0xa70c0: return cur_rev <= 0xa70C009; break; - case 0xaa001: return cur_rev <= 0xaa00116; break; - case 0xaa002: return cur_rev <= 0xaa00218; break; - case 0xb0021: return cur_rev <= 0xb002146; break; - case 0xb0081: return cur_rev <= 0xb008111; break; - case 0xb1010: return cur_rev <= 0xb101046; break; - case 0xb2040: return cur_rev <= 0xb204031; break; - case 0xb4040: return cur_rev <= 0xb404031; break; - case 0xb4041: return cur_rev <= 0xb404101; break; - case 0xb6000: return cur_rev <= 0xb600031; break; - case 0xb6080: return cur_rev <= 0xb608031; break; - case 0xb7000: return cur_rev <= 0xb700031; break; - default: break; - } + cutoff = get_cutoff_revision(cur_rev); + if (cutoff) + return cur_rev <= cutoff;
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n"); pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev); @@ -235,7 +246,7 @@ static bool cpu_has_entrysign(void) if (fam == 0x1a) { if (model <= 0x2f || (0x40 <= model && model <= 0x4f) || - (0x60 <= model && model <= 0x6f)) + (0x60 <= model && model <= 0x7f)) return true; }
@@ -468,6 +479,7 @@ static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size) { u8 family = x86_family(bsp_cpuid_1_eax); struct microcode_header_amd *mc_hdr; + u32 cur_rev, cutoff, patch_rev; unsigned int ret; u32 sh_psize; u16 proc_id; @@ -511,6 +523,24 @@ static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size) if (patch_fam != family) return 1;
+ cur_rev = get_patch_level(); + + /* No cutoff revision means old/unaffected by signing algorithm weakness => matches */ + cutoff = get_cutoff_revision(cur_rev); + if (!cutoff) + goto ok; + + patch_rev = mc_hdr->patch_id; + + if (cur_rev <= cutoff && patch_rev <= cutoff) + goto ok; + + if (cur_rev > cutoff && patch_rev > cutoff) + goto ok; + + return 1; +ok: + return 0; }
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 22abb5ee0cf2..aacb59c4a35c 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1879,7 +1879,7 @@ static int dump_xsave_layout_desc(struct coredump_params *cprm) };
if (!dump_emit(cprm, &xc, sizeof(xc))) - return 0; + return -1;
num_records++; } @@ -1917,7 +1917,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm) return 1;
num_records = dump_xsave_layout_desc(cprm); - if (!num_records) + if (num_records < 0) return 1;
/* Total size should be equal to the number of records */ diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 9400730e538e..16dd6b425101 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -391,6 +391,7 @@ DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
/* Posted Interrupt Descriptors for coalesced MSIs to be posted */ DEFINE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc); +static DEFINE_PER_CPU(bool, posted_msi_handler_active);
void intel_posted_msi_init(void) { @@ -408,6 +409,25 @@ void intel_posted_msi_init(void) this_cpu_write(posted_msi_pi_desc.ndst, destination); }
+void intel_ack_posted_msi_irq(struct irq_data *irqd) +{ + irq_move_irq(irqd); + + /* + * Handle the rare case that irq_retrigger() raised the actual + * assigned vector on the target CPU, which means that it was not + * invoked via the posted MSI handler below. In that case APIC EOI + * is required as otherwise the ISR entry becomes stale and lower + * priority interrupts are never going to be delivered after that. + * + * If the posted handler invoked the device interrupt handler then + * the EOI would be premature because it would acknowledge the + * posted vector. + */ + if (unlikely(!__this_cpu_read(posted_msi_handler_active))) + apic_eoi(); +} + /* * De-multiplexing posted interrupts is on the performance path, the code * below is written to optimize the cache performance based on the following @@ -483,6 +503,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
pid = this_cpu_ptr(&posted_msi_pi_desc);
+ /* Mark the handler active for intel_ack_posted_msi_irq() */ + __this_cpu_write(posted_msi_handler_active, true); inc_irq_stat(posted_msi_notification_count); irq_enter();
@@ -511,6 +533,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
apic_eoi(); irq_exit(); + __this_cpu_write(posted_msi_handler_active, false); set_irq_regs(old_regs); } #endif /* X86_POSTED_MSI */ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 33a6cb1ac603..7bc38fed2072 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2070,15 +2070,33 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic) ktime_t delta;
/* - * Synchronize both deadlines to the same time source or - * differences in the periods (caused by differences in the - * underlying clocks or numerical approximation errors) will - * cause the two to drift apart over time as the errors - * accumulate. + * Use kernel time as the time source for both the hrtimer deadline and + * TSC-based deadline so that they stay synchronized. Computing each + * deadline independently will cause the two deadlines to drift apart + * over time as differences in the periods accumulate, e.g. due to + * differences in the underlying clocks or numerical approximation errors. */ apic->lapic_timer.target_expiration = ktime_add_ns(apic->lapic_timer.target_expiration, apic->lapic_timer.period); + + /* + * If the new expiration is in the past, e.g. because userspace stopped + * running the VM for an extended duration, then force the expiration + * to "now" and don't try to play catch-up with the missed events. KVM + * will only deliver a single interrupt regardless of how many events + * are pending, i.e. restarting the timer with an expiration in the + * past will do nothing more than waste host cycles, and can even lead + * to a hard lockup in extreme cases. + */ + if (ktime_before(apic->lapic_timer.target_expiration, now)) + apic->lapic_timer.target_expiration = now; + + /* + * Note, ensuring the expiration isn't in the past also prevents delta + * from going negative, which could cause the TSC deadline to become + * excessively large due to it an unsigned value. + */ delta = ktime_sub(apic->lapic_timer.target_expiration, now); apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + nsec_to_cycles(apic->vcpu, delta); @@ -2869,9 +2887,9 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
apic_timer_expired(apic, true);
- if (lapic_is_periodic(apic)) { + if (lapic_is_periodic(apic) && !WARN_ON_ONCE(!apic->lapic_timer.period)) { advance_periodic_target_expiration(apic); - hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); + hrtimer_set_expires(&ktimer->timer, ktimer->target_expiration); return HRTIMER_RESTART; } else return HRTIMER_NORESTART; diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index ff6379fdf222..c6556599376e 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -546,6 +546,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12 struct kvm_vcpu *vcpu = &svm->vcpu;
nested_vmcb02_compute_g_pat(svm); + vmcb_mark_dirty(vmcb02, VMCB_NPT);
/* Load the nested guest state */ if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { @@ -676,6 +677,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, vmcb02->control.nested_ctl = vmcb01->control.nested_ctl; vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa; vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa; + vmcb_mark_dirty(vmcb02, VMCB_PERM_MAP);
/* Done at vmrun: asid. */
@@ -881,7 +883,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) if (!nested_vmcb_check_save(vcpu) || !nested_vmcb_check_controls(vcpu)) { vmcb12->control.exit_code = SVM_EXIT_ERR; - vmcb12->control.exit_code_hi = 0; + vmcb12->control.exit_code_hi = -1u; vmcb12->control.exit_info_1 = 0; vmcb12->control.exit_info_2 = 0; goto out; @@ -914,7 +916,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) svm->soft_int_injected = false;
svm->vmcb->control.exit_code = SVM_EXIT_ERR; - svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_code_hi = -1u; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 63c578e03f29..85816c4186b9 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2702,6 +2702,7 @@ static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
if (cr0 ^ val) { svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; + svm->vmcb->control.exit_code_hi = 0; ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); }
@@ -4650,31 +4651,45 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, case SVM_EXIT_WRITE_CR0: { unsigned long cr0, val;
- if (info->intercept == x86_intercept_cr_write) + /* + * Adjust the exit code accordingly if a CR other than CR0 is + * being written, and skip straight to the common handling as + * only CR0 has an additional selective intercept. + */ + if (info->intercept == x86_intercept_cr_write && info->modrm_reg) { icpt_info.exit_code += info->modrm_reg; - - if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || - info->intercept == x86_intercept_clts) break; + }
- if (!(vmcb12_is_intercept(&svm->nested.ctl, - INTERCEPT_SELECTIVE_CR0))) + /* + * Convert the exit_code to SVM_EXIT_CR0_SEL_WRITE if a + * selective CR0 intercept is triggered (the common logic will + * treat the selective intercept as being enabled). Note, the + * unconditional intercept has higher priority, i.e. this is + * only relevant if *only* the selective intercept is enabled. + */ + if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_CR0_WRITE) || + !(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))) break;
- cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; - val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; + /* CLTS never triggers INTERCEPT_SELECTIVE_CR0 */ + if (info->intercept == x86_intercept_clts) + break;
+ /* LMSW always triggers INTERCEPT_SELECTIVE_CR0 */ if (info->intercept == x86_intercept_lmsw) { - cr0 &= 0xfUL; - val &= 0xfUL; - /* lmsw can't clear PE - catch this here */ - if (cr0 & X86_CR0_PE) - val |= X86_CR0_PE; + icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; + break; }
+ /* + * MOV-to-CR0 only triggers INTERCEPT_SELECTIVE_CR0 if any bit + * other than SVM_CR0_SELECTIVE_MASK is changed. + */ + cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; + val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; if (cr0 ^ val) icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; - break; } case SVM_EXIT_READ_DR0: @@ -4735,6 +4750,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, if (static_cpu_has(X86_FEATURE_NRIPS)) vmcb->control.next_rip = info->next_rip; vmcb->control.exit_code = icpt_info.exit_code; + vmcb->control.exit_code_hi = 0; vmexit = nested_svm_exit_handled(svm);
ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index cada5db654b8..cfb43f8b0c75 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -662,9 +662,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm);
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) { - svm->vmcb->control.exit_code = exit_code; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; + svm->vmcb->control.exit_code = exit_code; + svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; return nested_svm_vmexit(svm); }
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 60bd2791d933..76f9624d4938 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -18,6 +18,7 @@ #include "trace.h" #include "vmx.h" #include "smm.h" +#include "x86_ops.h"
static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); @@ -5051,7 +5052,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (vmx->nested.update_vmcs01_apicv_status) { vmx->nested.update_vmcs01_apicv_status = false; - kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); + vmx_refresh_apicv_exec_ctrl(vcpu); }
if (vmx->nested.update_vmcs01_hwapic_isr) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c12d7e28243d..e84f85f2cb64 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1035,6 +1035,13 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) } EXPORT_SYMBOL_GPL(kvm_require_dr);
+static bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) +{ + u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; + + return (vcpu->arch.apf.msr_en_val & mask) == mask; +} + static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) { return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); @@ -1127,15 +1134,20 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon }
if ((cr0 ^ old_cr0) & X86_CR0_PG) { - kvm_clear_async_pf_completion_queue(vcpu); - kvm_async_pf_hash_reset(vcpu); - /* * Clearing CR0.PG is defined to flush the TLB from the guest's * perspective. */ if (!(cr0 & X86_CR0_PG)) kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); + /* + * Check for async #PF completion events when enabling paging, + * as the vCPU may have previously encountered async #PFs (it's + * entirely legal for the guest to toggle paging on/off without + * waiting for the async #PF queue to drain). + */ + else if (kvm_pv_async_pf_enabled(vcpu)) + kvm_make_request(KVM_REQ_APF_READY, vcpu); }
if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) @@ -3539,13 +3551,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 0; }
-static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) -{ - u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; - - return (vcpu->arch.apf.msr_en_val & mask) == mask; -} - static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index e033d5594265..bf750cd599b2 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -72,6 +72,7 @@ #include <asm/mwait.h> #include <asm/pci_x86.h> #include <asm/cpu.h> +#include <asm/irq_stack.h> #ifdef CONFIG_X86_IOPL_IOPERM #include <asm/io_bitmap.h> #endif @@ -93,6 +94,44 @@ void *xen_initial_gdt; static int xen_cpu_up_prepare_pv(unsigned int cpu); static int xen_cpu_dead_pv(unsigned int cpu);
+#ifndef CONFIG_PREEMPTION +/* + * Some hypercalls issued by the toolstack can take many 10s of + * seconds. Allow tasks running hypercalls via the privcmd driver to + * be voluntarily preempted even if full kernel preemption is + * disabled. + * + * Such preemptible hypercalls are bracketed by + * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() + * calls. + */ +DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); +EXPORT_PER_CPU_SYMBOL_GPL(xen_in_preemptible_hcall); + +/* + * In case of scheduling the flag must be cleared and restored after + * returning from schedule as the task might move to a different CPU. + */ +static __always_inline bool get_and_clear_inhcall(void) +{ + bool inhcall = __this_cpu_read(xen_in_preemptible_hcall); + + __this_cpu_write(xen_in_preemptible_hcall, false); + return inhcall; +} + +static __always_inline void restore_inhcall(bool inhcall) +{ + __this_cpu_write(xen_in_preemptible_hcall, inhcall); +} + +#else + +static __always_inline bool get_and_clear_inhcall(void) { return false; } +static __always_inline void restore_inhcall(bool inhcall) { } + +#endif + struct tls_descs { struct desc_struct desc[3]; }; @@ -686,6 +725,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check) } #endif
+static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + inc_irq_stat(irq_hv_callback_count); + + xen_evtchn_do_upcall(); + + set_irq_regs(old_regs); +} + +__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) +{ + irqentry_state_t state = irqentry_enter(regs); + bool inhcall; + + instrumentation_begin(); + run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); + + inhcall = get_and_clear_inhcall(); + if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { + irqentry_exit_cond_resched(); + instrumentation_end(); + restore_inhcall(inhcall); + } else { + instrumentation_end(); + irqentry_exit(regs, state); + } +} + struct trap_array_entry { void (*orig)(void); void (*xen)(void); diff --git a/block/blk-mq.c b/block/blk-mq.c index db72779760d5..1891863dcba1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3658,7 +3658,7 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) struct blk_mq_hw_ctx, cpuhp_online); int ret = 0;
- if (blk_mq_hctx_has_online_cpu(hctx, cpu)) + if (!hctx->nr_ctx || blk_mq_hctx_has_online_cpu(hctx, cpu)) return 0;
/* diff --git a/block/blk-zoned.c b/block/blk-zoned.c index f1160cc2cf85..7e04ed9b2c0b 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -73,6 +73,11 @@ struct blk_zone_wplug { struct gendisk *disk; };
+static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) +{ + return 1U << disk->zone_wplugs_hash_bits; +} + /* * Zone write plug flags bits: * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, @@ -621,6 +626,8 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) { struct bio *bio;
+ lockdep_assert_held(&zwplug->lock); + if (bio_list_empty(&zwplug->bio_list)) return;
@@ -628,6 +635,8 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) zwplug->disk->disk_name, zwplug->zone_no); while ((bio = bio_list_pop(&zwplug->bio_list))) blk_zone_wplug_bio_io_error(zwplug, bio); + + zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; }
/* @@ -708,71 +717,94 @@ static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector) disk_report_zones_cb, &args); }
-static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, - unsigned int wp_offset) +static void blk_zone_reset_bio_endio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; - sector_t sector = bio->bi_iter.bi_sector; struct blk_zone_wplug *zwplug; - unsigned long flags; - - /* Conventional zones cannot be reset nor finished. */ - if (disk_zone_is_conv(disk, sector)) { - bio_io_error(bio); - return true; - }
/* - * No-wait reset or finish BIOs do not make much sense as the callers - * issue these as blocking operations in most cases. To avoid issues - * the BIO execution potentially failing with BLK_STS_AGAIN, warn about - * REQ_NOWAIT being set and ignore that flag. - */ - if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) - bio->bi_opf &= ~REQ_NOWAIT; - - /* - * If we have a zone write plug, set its write pointer offset to 0 - * (reset case) or to the zone size (finish case). This will abort all - * BIOs plugged for the target zone. It is fine as resetting or - * finishing zones while writes are still in-flight will result in the + * If we have a zone write plug, set its write pointer offset to 0. + * This will abort all BIOs plugged for the target zone. It is fine as + * resetting zones while writes are still in-flight will result in the * writes failing anyway. */ - zwplug = disk_get_zone_wplug(disk, sector); + zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); if (zwplug) { + unsigned long flags; + spin_lock_irqsave(&zwplug->lock, flags); - disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); + disk_zone_wplug_set_wp_offset(disk, zwplug, 0); spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); } - - return false; }
-static bool blk_zone_wplug_handle_reset_all(struct bio *bio) +static void blk_zone_reset_all_bio_endio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; struct blk_zone_wplug *zwplug; unsigned long flags; - sector_t sector; + unsigned int i; + + if (atomic_read(&disk->nr_zone_wplugs)) { + /* Update the condition of all zone write plugs. */ + rcu_read_lock(); + for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) { + hlist_for_each_entry_rcu(zwplug, + &disk->zone_wplugs_hash[i], + node) { + spin_lock_irqsave(&zwplug->lock, flags); + disk_zone_wplug_set_wp_offset(disk, zwplug, 0); + spin_unlock_irqrestore(&zwplug->lock, flags); + } + } + rcu_read_unlock(); + } +} + +static void blk_zone_finish_bio_endio(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + struct gendisk *disk = bdev->bd_disk; + struct blk_zone_wplug *zwplug;
/* - * Set the write pointer offset of all zone write plugs to 0. This will - * abort all plugged BIOs. It is fine as resetting zones while writes - * are still in-flight will result in the writes failing anyway. + * If we have a zone write plug, set its write pointer offset to the + * zone size. This will abort all BIOs plugged for the target zone. It + * is fine as resetting zones while writes are still in-flight will + * result in the writes failing anyway. */ - for (sector = 0; sector < get_capacity(disk); - sector += disk->queue->limits.chunk_sectors) { - zwplug = disk_get_zone_wplug(disk, sector); - if (zwplug) { - spin_lock_irqsave(&zwplug->lock, flags); - disk_zone_wplug_set_wp_offset(disk, zwplug, 0); - spin_unlock_irqrestore(&zwplug->lock, flags); - disk_put_zone_wplug(zwplug); - } + zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); + if (zwplug) { + unsigned long flags; + + spin_lock_irqsave(&zwplug->lock, flags); + disk_zone_wplug_set_wp_offset(disk, zwplug, + bdev_zone_sectors(bdev)); + spin_unlock_irqrestore(&zwplug->lock, flags); + disk_put_zone_wplug(zwplug); } +}
- return false; +void blk_zone_mgmt_bio_endio(struct bio *bio) +{ + /* If the BIO failed, we have nothing to do. */ + if (bio->bi_status != BLK_STS_OK) + return; + + switch (bio_op(bio)) { + case REQ_OP_ZONE_RESET: + blk_zone_reset_bio_endio(bio); + return; + case REQ_OP_ZONE_RESET_ALL: + blk_zone_reset_all_bio_endio(bio); + return; + case REQ_OP_ZONE_FINISH: + blk_zone_finish_bio_endio(bio); + return; + default: + return; + } }
static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk, @@ -1115,6 +1147,32 @@ static void blk_zone_wplug_handle_native_zone_append(struct bio *bio) disk_put_zone_wplug(zwplug); }
+static bool blk_zone_wplug_handle_zone_mgmt(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + + if (bio_op(bio) != REQ_OP_ZONE_RESET_ALL && + disk_zone_is_conv(disk, bio->bi_iter.bi_sector)) { + /* + * Zone reset and zone finish operations do not apply to + * conventional zones. + */ + bio_io_error(bio); + return true; + } + + /* + * No-wait zone management BIOs do not make much sense as the callers + * issue these as blocking operations in most cases. To avoid issues + * with the BIO execution potentially failing with BLK_STS_AGAIN, warn + * about REQ_NOWAIT being set and ignore that flag. + */ + if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) + bio->bi_opf &= ~REQ_NOWAIT; + + return false; +} + /** * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging * @bio: The BIO being submitted @@ -1162,12 +1220,9 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) case REQ_OP_WRITE_ZEROES: return blk_zone_wplug_handle_write(bio, nr_segs); case REQ_OP_ZONE_RESET: - return blk_zone_wplug_handle_reset_or_finish(bio, 0); case REQ_OP_ZONE_FINISH: - return blk_zone_wplug_handle_reset_or_finish(bio, - bdev_zone_sectors(bdev)); case REQ_OP_ZONE_RESET_ALL: - return blk_zone_wplug_handle_reset_all(bio); + return blk_zone_wplug_handle_zone_mgmt(bio); default: return false; } @@ -1324,11 +1379,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work) disk_put_zone_wplug(zwplug); }
-static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) -{ - return 1U << disk->zone_wplugs_hash_bits; -} - void disk_init_zone_resources(struct gendisk *disk) { spin_lock_init(&disk->zone_wplugs_lock); @@ -1510,6 +1560,11 @@ static int disk_update_zone_resources(struct gendisk *disk, unsigned int nr_seq_zones, nr_conv_zones; unsigned int pool_size; struct queue_limits lim; + int ret = 0; + + lim = queue_limits_start_update(q); + + blk_mq_freeze_queue(q);
disk->nr_zones = args->nr_zones; disk->zone_capacity = args->zone_capacity; @@ -1519,11 +1574,10 @@ static int disk_update_zone_resources(struct gendisk *disk, if (nr_conv_zones >= disk->nr_zones) { pr_warn("%s: Invalid number of conventional zones %u / %u\n", disk->disk_name, nr_conv_zones, disk->nr_zones); - return -ENODEV; + ret = -ENODEV; + goto unfreeze; }
- lim = queue_limits_start_update(q); - /* * Some devices can advertize zone resource limits that are larger than * the number of sequential zones of the zoned block device, e.g. a @@ -1560,7 +1614,15 @@ static int disk_update_zone_resources(struct gendisk *disk, }
commit: - return queue_limits_commit_update_frozen(q, &lim); + ret = queue_limits_commit_update(q, &lim); + +unfreeze: + if (ret) + disk_free_zone_resources(disk); + + blk_mq_unfreeze_queue(q); + + return ret; }
static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx, @@ -1781,19 +1843,14 @@ int blk_revalidate_disk_zones(struct gendisk *disk) ret = -ENODEV; }
- /* - * Set the new disk zone parameters only once the queue is frozen and - * all I/Os are completed. - */ if (ret > 0) - ret = disk_update_zone_resources(disk, &args); - else - pr_warn("%s: failed to revalidate zones\n", disk->disk_name); - if (ret) { - blk_mq_freeze_queue(q); - disk_free_zone_resources(disk); - blk_mq_unfreeze_queue(q); - } + return disk_update_zone_resources(disk, &args); + + pr_warn("%s: failed to revalidate zones\n", disk->disk_name); + + blk_mq_freeze_queue(q); + disk_free_zone_resources(disk); + blk_mq_unfreeze_queue(q);
return ret; } diff --git a/block/blk.h b/block/blk.h index e91012247ff2..63c92db0a507 100644 --- a/block/blk.h +++ b/block/blk.h @@ -486,9 +486,23 @@ static inline void blk_zone_update_request_bio(struct request *rq, bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) bio->bi_iter.bi_sector = rq->__sector; } +void blk_zone_mgmt_bio_endio(struct bio *bio); void blk_zone_write_plug_bio_endio(struct bio *bio); static inline void blk_zone_bio_endio(struct bio *bio) { + /* + * Zone management BIOs may impact zone write plugs (e.g. a zone reset + * changes a zone write plug zone write pointer offset), but these + * operation do not go through zone write plugging as they may operate + * on zones that do not have a zone write + * plug. blk_zone_mgmt_bio_endio() handles the potential changes to zone + * write plugs that are present. + */ + if (op_is_zone_mgmt(bio_op(bio))) { + blk_zone_mgmt_bio_endio(bio); + return; + } + /* * For write BIOs to zoned devices, signal the completion of the BIO so * that the next write BIO can be submitted by zone write plugging. diff --git a/block/genhd.c b/block/genhd.c index 99344f53c789..22fea4eb4241 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -83,7 +83,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size) (disk->flags & GENHD_FL_HIDDEN)) return false;
- pr_info("%s: detected capacity change from %lld to %lld\n", + pr_info_ratelimited("%s: detected capacity change from %lld to %lld\n", disk->disk_name, capacity, size);
/* diff --git a/crypto/af_alg.c b/crypto/af_alg.c index ca6fdcc6c54a..6c271e55f44d 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1212,15 +1212,14 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, if (unlikely(!areq)) return ERR_PTR(-ENOMEM);
+ memset(areq, 0, areqlen); + ctx->inflight = true;
areq->areqlen = areqlen; areq->sk = sk; areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl; - areq->last_rsgl = NULL; INIT_LIST_HEAD(&areq->rsgl_list); - areq->tsgl = NULL; - areq->tsgl_entries = 0;
return areq; } diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index e3f1a4852737..4d3dfc60a16a 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -416,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) if (!ctx) return -ENOMEM;
- ctx->result = NULL; + memset(ctx, 0, len); ctx->len = len; - ctx->more = false; crypto_init_wait(&ctx->wait);
ask->private = ctx; diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 10c41adac3b1..1a86e40c8372 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -248,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk) if (!ctx) return -ENOMEM;
+ memset(ctx, 0, len); ctx->len = len; - ctx->addtl = NULL; - ctx->addtl_len = 0;
/* * No seeding done at that point -- if multiple accepts are diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 17e11d51ddc3..04928df0095b 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -50,6 +50,7 @@ static int seqiv_aead_encrypt(struct aead_request *req) struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); struct aead_request *subreq = aead_request_ctx(req); crypto_completion_t compl; + bool unaligned_info; void *data; u8 *info; unsigned int ivsize = 8; @@ -79,8 +80,9 @@ static int seqiv_aead_encrypt(struct aead_request *req) return err; }
- if (unlikely(!IS_ALIGNED((unsigned long)info, - crypto_aead_alignmask(geniv) + 1))) { + unaligned_info = !IS_ALIGNED((unsigned long)info, + crypto_aead_alignmask(geniv) + 1); + if (unlikely(unaligned_info)) { info = kmemdup(req->iv, ivsize, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC); @@ -100,7 +102,7 @@ static int seqiv_aead_encrypt(struct aead_request *req) scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
err = crypto_aead_encrypt(subreq); - if (unlikely(info != req->iv)) + if (unlikely(unaligned_info)) seqiv_aead_encrypt_complete2(req, err); return err; } diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c index 07a034a53aca..59d2308259cb 100644 --- a/drivers/acpi/acpi_pcc.c +++ b/drivers/acpi/acpi_pcc.c @@ -53,7 +53,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function, struct pcc_data *data; struct acpi_pcc_info *ctx = handler_context; struct pcc_mbox_chan *pcc_chan; - static acpi_status ret; + acpi_status ret;
data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index eee396a77bae..1b000ccbf8e1 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -169,9 +169,12 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; - if (!start_node) { - return_ACPI_STATUS(AE_NO_NAMESPACE); - } + } + + /* Avoid walking the namespace if the StartNode is NULL */ + + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); }
/* Null child means "get first node" */ diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 62b723f6c48d..1e8e2002f81a 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -1297,7 +1297,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) /* Are any of the regs PCC ?*/ if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) || - CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) { + CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg) || + CPC_IN_PCC(guaranteed_reg)) { if (pcc_ss_id < 0) { pr_debug("Invalid pcc_ss_id\n"); return -ENODEV; diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index 612ccc4c2827..eb48ac000e3d 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -11,6 +11,7 @@ #define _ACPI_FAN_H_
#include <linux/kconfig.h> +#include <linux/limits.h>
#define ACPI_FAN_DEVICE_IDS \ {"INT3404", }, /* Fan */ \ @@ -58,6 +59,38 @@ struct acpi_fan { struct device_attribute fine_grain_control; };
+/** + * acpi_fan_speed_valid - Check if fan speed value is valid + * @speeed: Speed value returned by the ACPI firmware + * + * Check if the fan speed value returned by the ACPI firmware is valid. This function is + * necessary as ACPI firmware implementations can return 0xFFFFFFFF to signal that the + * ACPI fan does not support speed reporting. Additionally, some buggy ACPI firmware + * implementations return a value larger than the 32-bit integer value defined by + * the ACPI specification when using placeholder values. Such invalid values are also + * detected by this function. + * + * Returns: True if the fan speed value is valid, false otherwise. + */ +static inline bool acpi_fan_speed_valid(u64 speed) +{ + return speed < U32_MAX; +} + +/** + * acpi_fan_power_valid - Check if fan power value is valid + * @power: Power value returned by the ACPI firmware + * + * Check if the fan power value returned by the ACPI firmware is valid. + * See acpi_fan_speed_valid() for details. + * + * Returns: True if the fan power value is valid, false otherwise. + */ +static inline bool acpi_fan_power_valid(u64 power) +{ + return power < U32_MAX; +} + int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst); int acpi_fan_create_attributes(struct acpi_device *device); void acpi_fan_delete_attributes(struct acpi_device *device); diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c index 4b2c2007f2d7..47a02ef5a606 100644 --- a/drivers/acpi/fan_hwmon.c +++ b/drivers/acpi/fan_hwmon.c @@ -15,10 +15,6 @@
#include "fan.h"
-/* Returned when the ACPI fan does not support speed reporting */ -#define FAN_SPEED_UNAVAILABLE U32_MAX -#define FAN_POWER_UNAVAILABLE U32_MAX - static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control) { unsigned int i; @@ -77,7 +73,7 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_ * when the associated attribute should not be created. */ for (i = 0; i < fan->fps_count; i++) { - if (fan->fps[i].power != FAN_POWER_UNAVAILABLE) + if (acpi_fan_power_valid(fan->fps[i].power)) return 0444; }
@@ -106,7 +102,7 @@ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_fan: switch (attr) { case hwmon_fan_input: - if (fst.speed == FAN_SPEED_UNAVAILABLE) + if (!acpi_fan_speed_valid(fst.speed)) return -ENODEV;
if (fst.speed > LONG_MAX) @@ -134,7 +130,7 @@ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, if (!fps) return -EIO;
- if (fps->power == FAN_POWER_UNAVAILABLE) + if (!acpi_fan_power_valid(fps->power)) return -ENODEV;
if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index b7ee463e757d..8a37de04b69b 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1441,7 +1441,7 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) { do { - port = fwnode_get_next_child_node(fwnode, port); + port = acpi_get_next_subnode(fwnode, port); /* * The names of the port nodes begin with "port@" * followed by the number of the port node and they also @@ -1459,13 +1459,13 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint( if (!port) return NULL;
- endpoint = fwnode_get_next_child_node(port, prev); + endpoint = acpi_get_next_subnode(port, prev); while (!endpoint) { - port = fwnode_get_next_child_node(fwnode, port); + port = acpi_get_next_subnode(fwnode, port); if (!port) break; if (is_acpi_graph_node(port, "port")) - endpoint = fwnode_get_next_child_node(port, NULL); + endpoint = acpi_get_next_subnode(port, NULL); }
/* diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index c0e8b765522d..f23c3ed01810 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -144,6 +144,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn) if (!dev) return -EPROBE_DEFER; ahb = dev_get_drvdata(dev); + put_device(dev); val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL); val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE; gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index d8aaa5b61628..425c44f1e4d3 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1829,16 +1829,18 @@ void pm_runtime_init(struct device *dev) */ void pm_runtime_reinit(struct device *dev) { - if (!pm_runtime_enabled(dev)) { - if (dev->power.runtime_status == RPM_ACTIVE) - pm_runtime_set_suspended(dev); - if (dev->power.irq_safe) { - spin_lock_irq(&dev->power.lock); - dev->power.irq_safe = 0; - spin_unlock_irq(&dev->power.lock); - if (dev->parent) - pm_runtime_put(dev->parent); - } + if (pm_runtime_enabled(dev)) + return; + + if (dev->power.runtime_status == RPM_ACTIVE) + pm_runtime_set_suspended(dev); + + if (dev->power.irq_safe) { + spin_lock_irq(&dev->power.lock); + dev->power.irq_safe = 0; + spin_unlock_irq(&dev->power.lock); + if (dev->parent) + pm_runtime_put(dev->parent); } /* * Clear power.needs_force_resume in case it has been set by diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3affb538b989..7f1880284a7b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -331,7 +331,7 @@ static bool initialized; * This default is used whenever the current disk size is unknown. * [Now it is rather a minimum] */ -#define MAX_DISK_SIZE 4 /* 3984 */ +#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
/* * globals used by 'result()' diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index c34695d2eea7..5be0581c3334 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1424,9 +1424,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, goto out_alloc; }
- ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1, - GFP_KERNEL); - if (ret < 0) { + dev->clt_device_id = ida_alloc_max(&index_ida, + (1 << (MINORBITS - RNBD_PART_BITS)) - 1, + GFP_KERNEL); + if (dev->clt_device_id < 0) { + ret = dev->clt_device_id; pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", pathname, sess->sessname, ret); goto out_queues; @@ -1435,10 +1437,9 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, dev->pathname = kstrdup(pathname, GFP_KERNEL); if (!dev->pathname) { ret = -ENOMEM; - goto out_queues; + goto out_ida; }
- dev->clt_device_id = ret; dev->sess = sess; dev->access_mode = access_mode; dev->nr_poll_queues = nr_poll_queues; @@ -1454,6 +1455,8 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
return dev;
+out_ida: + ida_free(&index_ida, dev->clt_device_id); out_queues: kfree(dev->hw_queues); out_alloc: diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h index a48e040abe63..fbc1ed766025 100644 --- a/drivers/block/rnbd/rnbd-clt.h +++ b/drivers/block/rnbd/rnbd-clt.h @@ -112,7 +112,7 @@ struct rnbd_clt_dev { struct rnbd_queue *hw_queues; u32 device_id; /* local Idr index - used to track minor number allocations. */ - u32 clt_device_id; + int clt_device_id; struct mutex lock; enum rnbd_clt_dev_state dev_state; refcount_t refcount; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index dc0c45756c44..603ff13d9f7c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -501,6 +501,8 @@ static const struct usb_device_id quirks_table[] = { /* Realtek 8821CE Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3533), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH },
/* Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | @@ -575,6 +577,8 @@ static const struct usb_device_id quirks_table[] = { /* Realtek 8852BT/8852BE-VT Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe12f), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH },
/* Realtek 8922AE Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8922), .driver_info = BTUSB_REALTEK | @@ -611,6 +615,8 @@ static const struct usb_device_id quirks_table[] = { /* Additional MediaTek MT7920 Bluetooth devices */ { USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe135), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK | @@ -673,6 +679,8 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe170), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK | @@ -763,6 +771,8 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2b89, 0x6275), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, @@ -3825,7 +3835,7 @@ static int btusb_probe(struct usb_interface *intf, return -ENODEV; }
- data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM;
@@ -3848,8 +3858,10 @@ static int btusb_probe(struct usb_interface *intf, } }
- if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) + if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { + kfree(data); return -ENODEV; + }
if (id->driver_info & BTUSB_AMP) { data->cmdreq_type = USB_TYPE_CLASS | 0x01; @@ -3904,8 +3916,10 @@ static int btusb_probe(struct usb_interface *intf, data->recv_acl = hci_recv_frame;
hdev = hci_alloc_dev_priv(priv_size); - if (!hdev) + if (!hdev) { + kfree(data); return -ENOMEM; + }
hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); @@ -4177,6 +4191,7 @@ static int btusb_probe(struct usb_interface *intf, if (data->reset_gpio) gpiod_put(data->reset_gpio); hci_free_dev(hdev); + kfree(data); return err; }
@@ -4225,6 +4240,7 @@ static void btusb_disconnect(struct usb_interface *intf) }
hci_free_dev(hdev); + kfree(data); }
#ifdef CONFIG_PM diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f715c8d28129..27149eb29afb 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -48,6 +48,7 @@ enum sysc_soc { SOC_UNKNOWN, SOC_2420, SOC_2430, + SOC_AM33, SOC_3430, SOC_AM35, SOC_3630, @@ -2896,6 +2897,7 @@ static void ti_sysc_idle(struct work_struct *work) static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("OMAP242*", SOC_2420), SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("AM33*", SOC_AM33), SOC_FLAG("AM35*", SOC_AM35), SOC_FLAG("OMAP3[45]*", SOC_3430), SOC_FLAG("OMAP3[67]*", SOC_3630), @@ -3101,10 +3103,15 @@ static int sysc_check_active_timer(struct sysc *ddata) * can be dropped if we stop supporting old beagleboard revisions * A to B4 at some point. */ - if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) + switch (sysc_soc->soc) { + case SOC_AM33: + case SOC_3430: + case SOC_AM35: error = -ENXIO; - else + break; + default: error = -EBUSY; + }
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 9fed9706d9cd..c138c468f3a4 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -835,7 +835,10 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ret = -ENOTTY; break; } - Dummy = readb(apbs[IndexCard].RamIO + VERS); + + if (cmd != 6) + Dummy = readb(apbs[IndexCard].RamIO + VERS); + kfree(adgl); mutex_unlock(&ac_mutex); return ret; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 99fe01321971..188722ec0337 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -613,7 +613,8 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf); static int __ipmi_bmc_register(struct ipmi_smi *intf, struct ipmi_device_id *id, bool guid_set, guid_t *guid, int intf_num); -static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); +static int __scan_channels(struct ipmi_smi *intf, + struct ipmi_device_id *id, bool rescan);
/* @@ -2665,7 +2666,7 @@ static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) need_waiter(intf); /* Retry later on an error. */ else - __scan_channels(intf, &id); + __scan_channels(intf, &id, false);
if (!intf_set) { @@ -2685,7 +2686,7 @@ static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, goto out_noprocessing; } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) /* Version info changes, scan the channels again. */ - __scan_channels(intf, &bmc->fetch_id); + __scan_channels(intf, &bmc->fetch_id, true);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
@@ -3414,8 +3415,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) intf->channels_ready = true; wake_up(&intf->waitq); } else { - intf->channel_list = intf->wchannels + set; - intf->channels_ready = true; rv = send_channel_info_cmd(intf, intf->curr_channel); }
@@ -3437,10 +3436,17 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) /* * Must be holding intf->bmc_reg_mutex to call this. */ -static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) +static int __scan_channels(struct ipmi_smi *intf, + struct ipmi_device_id *id, + bool rescan) { int rv;
+ if (rescan) { + /* Clear channels_ready to force channels rescan. */ + intf->channels_ready = false; + } + if (ipmi_version_major(id) > 1 || (ipmi_version_major(id) == 1 && ipmi_version_minor(id) >= 5)) { @@ -3642,7 +3648,7 @@ int ipmi_add_smi(struct module *owner, }
mutex_lock(&intf->bmc_reg_mutex); - rv = __scan_channels(intf, &id); + rv = __scan_channels(intf, &id, false); mutex_unlock(&intf->bmc_reg_mutex); if (rv) goto out_err_bmc_reg; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index e25daf2396d3..dfeb28866a32 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -282,7 +282,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); - kfree(chip->allocated_banks); #ifdef CONFIG_TCG_TPM2_HMAC kfree(chip->auth); #endif diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index cf64c7385105..b49a790f1bd5 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c @@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) */ int tpm1_get_pcr_allocation(struct tpm_chip *chip) { - chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) - return -ENOMEM; - chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index dfdcbd009720..b58a8a7e1564 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -11,8 +11,11 @@ * used by the kernel internally. */
+#include "linux/dev_printk.h" +#include "linux/tpm.h" #include "tpm.h" #include <crypto/hash_info.h> +#include <linux/unaligned.h>
static bool disable_pcr_integrity; module_param(disable_pcr_integrity, bool, 0444); @@ -602,11 +605,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
nr_possible_banks = be32_to_cpup( (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]); - - chip->allocated_banks = kcalloc(nr_possible_banks, - sizeof(*chip->allocated_banks), - GFP_KERNEL); - if (!chip->allocated_banks) { + if (nr_possible_banks > TPM2_MAX_PCR_BANKS) { + pr_err("tpm: out of bank capacity: %u > %u\n", + nr_possible_banks, TPM2_MAX_PCR_BANKS); rc = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index cf0b83154044..a10db4a4aced 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -156,47 +156,60 @@ static u8 name_size(const u8 *name) return size_map[alg] + 2; }
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) +static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name) { - struct tpm_header *head = (struct tpm_header *)buf->data; + u32 mso = tpm2_handle_mso(handle); off_t offset = TPM_HEADER_SIZE; - u32 tot_len = be32_to_cpu(head->length); - u32 val; - - /* we're starting after the header so adjust the length */ - tot_len -= TPM_HEADER_SIZE; - - /* skip public */ - val = tpm_buf_read_u16(buf, &offset); - if (val > tot_len) - return -EINVAL; - offset += val; - /* name */ - val = tpm_buf_read_u16(buf, &offset); - if (val != name_size(&buf->data[offset])) - return -EINVAL; - memcpy(name, &buf->data[offset], val); - /* forget the rest */ - return 0; -} - -static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) -{ - struct tpm_buf buf; int rc; + u8 name_size_alg; + struct tpm_buf buf; + + if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE && + mso != TPM2_MSO_NVRAM) { + memcpy(name, &handle, sizeof(u32)); + return sizeof(u32); + }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); if (rc) return rc;
tpm_buf_append_u32(&buf, handle); - rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); - if (rc == TPM2_RC_SUCCESS) - rc = tpm2_parse_read_public(name, &buf);
- tpm_buf_destroy(&buf); + rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic"); + if (rc) { + tpm_buf_destroy(&buf); + return tpm_ret_to_err(rc); + }
- return rc; + /* Skip TPMT_PUBLIC: */ + offset += tpm_buf_read_u16(&buf, &offset); + + /* + * Ensure space for the length field of TPM2B_NAME and hashAlg field of + * TPMT_HA (the extra four bytes). + */ + if (offset + 4 > tpm_buf_length(&buf)) { + tpm_buf_destroy(&buf); + return -EIO; + } + + rc = tpm_buf_read_u16(&buf, &offset); + name_size_alg = name_size(&buf.data[offset]); + + if (rc != name_size_alg) { + tpm_buf_destroy(&buf); + return -EIO; + } + + if (offset + rc > tpm_buf_length(&buf)) { + tpm_buf_destroy(&buf); + return -EIO; + } + + memcpy(name, &buf.data[offset], rc); + tpm_buf_destroy(&buf); + return name_size_alg; } #endif /* CONFIG_TCG_TPM2_HMAC */
@@ -229,6 +242,7 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, enum tpm2_mso_type mso = tpm2_handle_mso(handle); struct tpm2_auth *auth; int slot; + int ret; #endif
if (!tpm2_chip_auth(chip)) { @@ -251,8 +265,11 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, if (mso == TPM2_MSO_PERSISTENT || mso == TPM2_MSO_VOLATILE || mso == TPM2_MSO_NVRAM) { - if (!name) - tpm2_read_public(chip, handle, auth->name[slot]); + if (!name) { + ret = tpm2_read_public(chip, handle, auth->name[slot]); + if (ret < 0) + goto err; + } } else { if (name) dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); @@ -261,6 +278,10 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, auth->name_h[slot] = handle; if (name) memcpy(auth->name[slot], name, name_size(name)); + return; + +err: + tpm2_end_auth_session(chip); #endif } EXPORT_SYMBOL_GPL(tpm_buf_append_name); diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c index 03c59bf22106..b47c86906046 100644 --- a/drivers/clk/mvebu/cp110-system-controller.c +++ b/drivers/clk/mvebu/cp110-system-controller.c @@ -110,6 +110,25 @@ static const char * const gate_base_names[] = { [CP110_GATE_EIP197] = "eip197" };
+static unsigned long gate_flags(const u8 bit_idx) +{ + switch (bit_idx) { + case CP110_GATE_PCIE_X1_0: + case CP110_GATE_PCIE_X1_1: + case CP110_GATE_PCIE_X4: + /* + * If a port had an active link at boot time, stopping + * the clock creates a failed state from which controller + * driver can not recover. + * Prevent stopping this clock till after a driver has taken + * ownership. + */ + return CLK_IGNORE_UNUSED; + default: + return 0; + } +}; + struct cp110_gate_clk { struct clk_hw hw; struct regmap *regmap; @@ -171,6 +190,7 @@ static struct clk_hw *cp110_register_gate(const char *name, init.ops = &cp110_gate_ops; init.parent_names = &parent_name; init.num_parents = 1; + init.flags = gate_flags(bit_idx);
gate->regmap = regmap; gate->bit_idx = bit_idx; diff --git a/drivers/clk/qcom/dispcc-sm7150.c b/drivers/clk/qcom/dispcc-sm7150.c index d32bd7df1433..1e2a98a63511 100644 --- a/drivers/clk/qcom/dispcc-sm7150.c +++ b/drivers/clk/qcom/dispcc-sm7150.c @@ -357,7 +357,7 @@ static struct clk_rcg2 dispcc_mdss_pclk0_clk_src = { .name = "dispcc_mdss_pclk0_clk_src", .parent_data = dispcc_parent_data_4, .num_parents = ARRAY_SIZE(dispcc_parent_data_4), - .flags = CLK_SET_RATE_PARENT, + .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, .ops = &clk_pixel_ops, }, }; diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 2ef5748c139b..5f64d93b2fac 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c @@ -174,6 +174,7 @@ static int exynos_clkout_probe(struct platform_device *pdev) clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT; clkout->mux.lock = &clkout->slock;
+ clkout->data.num = EXYNOS_CLKOUT_NR_CLKS; clkout->data.hws[0] = clk_hw_register_composite(NULL, "clkout", parent_names, parent_count, &clkout->mux.hw, &clk_mux_ops, NULL, NULL, &clkout->gate.hw, @@ -184,7 +185,6 @@ static int exynos_clkout_probe(struct platform_device *pdev) goto err_unmap; }
- clkout->data.num = EXYNOS_CLKOUT_NR_CLKS; ret = of_clk_add_hw_provider(clkout->np, of_clk_hw_onecell_get, &clkout->data); if (ret) goto err_clk_unreg; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 67bac12d4d55..dbd73cd0cf53 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -87,6 +87,7 @@ static const struct of_device_id allowlist[] __initconst = { { .compatible = "st-ericsson,u9540", },
{ .compatible = "starfive,jh7110", }, + { .compatible = "starfive,jh7110s", },
{ .compatible = "ti,omap2", }, { .compatible = "ti,omap4", }, diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index fedad1081973..fbbbe501cf2d 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -145,6 +145,8 @@ static unsigned int nforce2_fsb_read(int bootfsb) pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb); fsb /= 1000000;
+ pci_dev_put(nforce2_sub5); + /* Check if PLL register is already set */ pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
@@ -426,6 +428,7 @@ static int __init nforce2_init(void) static void __exit nforce2_exit(void) { cpufreq_unregister_driver(&nforce2_driver); + pci_dev_put(nforce2_dev); }
module_init(nforce2_init); diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 76c888ed8d16..d2fa42beae9c 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -518,7 +518,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0) { ret = -EINVAL; - goto out_dmc1; + goto out; }
/* @@ -530,7 +530,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) if ((mem_type != LPDDR) && (mem_type != LPDDR2)) { pr_err("CPUFreq doesn't support this memory type\n"); ret = -EINVAL; - goto out_dmc1; + goto out; }
/* Find current refresh counter and frequency each DMC */ @@ -544,6 +544,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) cpufreq_generic_init(policy, s5pv210_freq_table, 40000); return 0;
+out: + clk_put(dmc1_clk); out_dmc1: clk_put(dmc0_clk); out_dmc0: diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 9069c36a491d..3be761961f1b 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -323,12 +323,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, }
/* - * Use a physical idle state, not busy polling, unless a timer - * is going to trigger soon enough or the exit latency of the - * idle state in question is greater than the predicted idle - * duration. + * Use a physical idle state instead of busy polling so long as + * its target residency is below the residency threshold, its + * exit latency is not greater than the predicted idle duration, + * and the next timer doesn't expire soon. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && + s->target_residency_ns < RESIDENCY_THRESHOLD_NS && s->target_residency_ns <= data->next_timer_ns && s->exit_latency_ns <= predicted_ns) { predicted_ns = s->target_residency_ns; diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index 173ddcac540a..520aa466c2e7 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -462,11 +462,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * If the closest expected timer is before the target residency of the * candidate state, a shallower one needs to be found. */ - if (drv->states[idx].target_residency_ns > duration_ns) { - i = teo_find_shallower_state(drv, dev, idx, duration_ns, false); - if (teo_state_ok(i, drv)) - idx = i; - } + if (drv->states[idx].target_residency_ns > duration_ns) + idx = teo_find_shallower_state(drv, dev, idx, duration_ns, false);
/* * If the selected state's target residency is below the tick length diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index b3d14a7f4dd1..0eb43c862516 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -181,7 +181,9 @@ static inline void test_len(struct hwrng *rng, size_t len, bool wait) struct device *dev = ctx->ctrldev;
buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL); - + if (!buf) { + return; + } while (len > 0) { read_len = rng->read(rng, buf, len, wait);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index ea31ac7ac1ca..e59053738a43 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -36,6 +36,8 @@
static char driver_name[] = KBUILD_MODNAME;
+#define RCV_BUFFER_SIZE (16 * 1024) + /* this is the physical layout of a PCL, its size is 128 bytes */ struct pcl { __le32 next; @@ -517,16 +519,14 @@ remove_card(struct pci_dev *dev) lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); - dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, lynx->rcv_buffer, - lynx->rcv_buffer_bus); + dma_free_coherent(&lynx->pci_device->dev, RCV_BUFFER_SIZE, + lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers); pci_disable_device(dev); lynx_put(lynx); }
-#define RCV_BUFFER_SIZE (16 * 1024) - static int add_card(struct pci_dev *dev, const struct pci_device_id *unused) { @@ -680,7 +680,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused) dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl), lynx->rcv_pcl, lynx->rcv_pcl_bus); if (lynx->rcv_buffer) - dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, + dma_free_coherent(&lynx->pci_device->dev, RCV_BUFFER_SIZE, lynx->rcv_buffer, lynx->rcv_buffer_bus); iounmap(lynx->registers);
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c index f2b902e95b73..b9f6128d56f7 100644 --- a/drivers/firmware/imx/imx-scu-irq.c +++ b/drivers/firmware/imx/imx-scu-irq.c @@ -214,6 +214,8 @@ int imx_scu_enable_general_irq_channel(struct device *dev) cl->dev = dev; cl->rx_callback = imx_scu_irq_callback;
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); + /* SCU general IRQ uses general interrupt channel 3 */ ch = mbox_request_channel_byname(cl, "gip3"); if (IS_ERR(ch)) { @@ -223,8 +225,6 @@ int imx_scu_enable_general_irq_channel(struct device *dev) return ret; }
- INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); - if (!of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", 0, &spec)) { i = of_alias_get_id(spec.np, "mu"); diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 4627a00a5590..3388b602b70b 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2025, Altera Corporation */
#include <linux/completion.h> @@ -175,6 +176,12 @@ struct stratix10_svc_chan { static LIST_HEAD(svc_ctrl); static LIST_HEAD(svc_data_mem);
+/** + * svc_mem_lock protects access to the svc_data_mem list for + * concurrent multi-client operations + */ +static DEFINE_MUTEX(svc_mem_lock); + /** * svc_pa_to_va() - translate physical address to virtual address * @addr: to be translated physical address @@ -187,6 +194,7 @@ static void *svc_pa_to_va(unsigned long addr) struct stratix10_svc_data_mem *pmem;
pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr); + guard(mutex)(&svc_mem_lock); list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->paddr == addr) return pmem->vaddr; @@ -996,6 +1004,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) p_data->flag = ct->flags; } } else { + guard(mutex)(&svc_mem_lock); list_for_each_entry(p_mem, &svc_data_mem, node) if (p_mem->vaddr == p_msg->payload) { p_data->paddr = p_mem->paddr; @@ -1078,6 +1087,7 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, if (!pmem) return ERR_PTR(-ENOMEM);
+ guard(mutex)(&svc_mem_lock); va = gen_pool_alloc(genpool, s); if (!va) return ERR_PTR(-ENOMEM); @@ -1106,6 +1116,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; + guard(mutex)(&svc_mem_lock);
list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 1429e8c0229b..9fe91a876ec5 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_OF_GPIO) += gpiolib-of.o obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o +gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
# Device drivers. Generally keep list sorted alphabetically diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c index fed9af5ff9ec..0eee7fca39df 100644 --- a/drivers/gpio/gpio-regmap.c +++ b/drivers/gpio/gpio-regmap.c @@ -310,7 +310,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config config->regmap_irq_line, config->regmap_irq_flags, 0, config->regmap_irq_chip, &gpio->irq_chip_data); if (ret) - goto err_free_bitmap; + goto err_remove_gpiochip;
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data); } else diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c new file mode 100644 index 000000000000..97862185318e --- /dev/null +++ b/drivers/gpio/gpiolib-acpi-core.c @@ -0,0 +1,1419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI helpers for GPIO API + * + * Copyright (C) 2012, Intel Corporation + * Authors: Mathias Nyman mathias.nyman@linux.intel.com + * Mika Westerberg mika.westerberg@linux.intel.com + */ + +#include <linux/acpi.h> +#include <linux/dmi.h> +#include <linux/errno.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/mutex.h> +#include <linux/pinctrl/pinctrl.h> + +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/gpio/machine.h> + +#include "gpiolib.h" +#include "gpiolib-acpi.h" + +/** + * struct acpi_gpio_event - ACPI GPIO event handler data + * + * @node: list-entry of the events list of the struct acpi_gpio_chip + * @handle: handle of ACPI method to execute when the IRQ triggers + * @handler: handler function to pass to request_irq() when requesting the IRQ + * @pin: GPIO pin number on the struct gpio_chip + * @irq: Linux IRQ number for the event, for request_irq() / free_irq() + * @irqflags: flags to pass to request_irq() when requesting the IRQ + * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source + * @irq_requested:True if request_irq() has been done + * @desc: struct gpio_desc for the GPIO pin for this event + */ +struct acpi_gpio_event { + struct list_head node; + acpi_handle handle; + irq_handler_t handler; + unsigned int pin; + unsigned int irq; + unsigned long irqflags; + bool irq_is_wake; + bool irq_requested; + struct gpio_desc *desc; +}; + +struct acpi_gpio_connection { + struct list_head node; + unsigned int pin; + struct gpio_desc *desc; +}; + +struct acpi_gpio_chip { + /* + * ACPICA requires that the first field of the context parameter + * passed to acpi_install_address_space_handler() is large enough + * to hold struct acpi_connection_info. + */ + struct acpi_connection_info conn_info; + struct list_head conns; + struct mutex conn_lock; + struct gpio_chip *chip; + struct list_head events; + struct list_head deferred_req_irqs_list_entry; +}; + +/** + * struct acpi_gpio_info - ACPI GPIO specific information + * @adev: reference to ACPI device which consumes GPIO resource + * @flags: GPIO initialization flags + * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo + * @pin_config: pin bias as provided by ACPI + * @polarity: interrupt polarity as provided by ACPI + * @triggering: triggering type as provided by ACPI + * @wake_capable: wake capability as provided by ACPI + * @debounce: debounce timeout as provided by ACPI + * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping + */ +struct acpi_gpio_info { + struct acpi_device *adev; + enum gpiod_flags flags; + bool gpioint; + int pin_config; + int polarity; + int triggering; + bool wake_capable; + unsigned int debounce; + unsigned int quirks; +}; + +static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data) +{ + /* First check the actual GPIO device */ + if (device_match_acpi_handle(&gc->gpiodev->dev, data)) + return true; + + /* + * When the ACPI device is artificially split to the banks of GPIOs, + * where each of them is represented by a separate GPIO device, + * the firmware node of the physical device may not be shared among + * the banks as they may require different values for the same property, + * e.g., number of GPIOs in a certain bank. In such case the ACPI handle + * of a GPIO device is NULL and can not be used. Hence we have to check + * the parent device to be sure that there is no match before bailing + * out. + */ + if (gc->parent) + return device_match_acpi_handle(gc->parent, data); + + return false; +} + +/** + * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API + * @path: ACPI GPIO controller full path name, (e.g. "\_SB.GPO1") + * @pin: ACPI GPIO pin number (0-based, controller-relative) + * + * Returns: + * GPIO descriptor to use with Linux generic GPIO API. + * If the GPIO cannot be translated or there is an error an ERR_PTR is + * returned. + * + * Specifically returns %-EPROBE_DEFER if the referenced GPIO + * controller does not have GPIO chip registered at the moment. This is to + * support probe deferral. + */ +static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin) +{ + acpi_handle handle; + acpi_status status; + + status = acpi_get_handle(NULL, path, &handle); + if (ACPI_FAILURE(status)) + return ERR_PTR(-ENODEV); + + struct gpio_device *gdev __free(gpio_device_put) = + gpio_device_find(handle, acpi_gpiochip_find); + if (!gdev) + return ERR_PTR(-EPROBE_DEFER); + + /* + * FIXME: keep track of the reference to the GPIO device somehow + * instead of putting it here. + */ + return gpio_device_get_desc(gdev, pin); +} + +static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) +{ + struct acpi_gpio_event *event = data; + + acpi_evaluate_object(event->handle, NULL, NULL, NULL); + + return IRQ_HANDLED; +} + +static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data) +{ + struct acpi_gpio_event *event = data; + + acpi_execute_simple_method(event->handle, NULL, event->pin); + + return IRQ_HANDLED; +} + +static void acpi_gpio_chip_dh(acpi_handle handle, void *data) +{ + /* The address of this function is used as a key. */ +} + +bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio) +{ + struct acpi_resource_gpio *gpio; + + if (ares->type != ACPI_RESOURCE_TYPE_GPIO) + return false; + + gpio = &ares->data.gpio; + if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT) + return false; + + *agpio = gpio; + return true; +} +EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); + +/** + * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO + * I/O resource or return False if not. + * @ares: Pointer to the ACPI resource to fetch + * @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer + * + * Returns: + * %true if GpioIo resource is found, %false otherwise. + */ +bool acpi_gpio_get_io_resource(struct acpi_resource *ares, + struct acpi_resource_gpio **agpio) +{ + struct acpi_resource_gpio *gpio; + + if (ares->type != ACPI_RESOURCE_TYPE_GPIO) + return false; + + gpio = &ares->data.gpio; + if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO) + return false; + + *agpio = gpio; + return true; +} +EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource); + +static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, + struct acpi_gpio_event *event) +{ + struct device *parent = acpi_gpio->chip->parent; + int ret, value; + + ret = request_threaded_irq(event->irq, NULL, event->handler, + event->irqflags | IRQF_ONESHOT, "ACPI:Event", event); + if (ret) { + dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq); + return; + } + + if (event->irq_is_wake) + enable_irq_wake(event->irq); + + event->irq_requested = true; + + /* Make sure we trigger the initial state of edge-triggered IRQs */ + if (acpi_gpio_need_run_edge_events_on_boot() && + (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) { + value = gpiod_get_raw_value_cansleep(event->desc); + if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || + ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) + event->handler(event->irq, event); + } +} + +static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) +{ + struct acpi_gpio_event *event; + + list_for_each_entry(event, &acpi_gpio->events, node) + acpi_gpiochip_request_irq(acpi_gpio, event); +} + +static enum gpiod_flags +acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity) +{ + /* GpioInt() implies input configuration */ + if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) + return GPIOD_IN; + + switch (agpio->io_restriction) { + case ACPI_IO_RESTRICT_INPUT: + return GPIOD_IN; + case ACPI_IO_RESTRICT_OUTPUT: + /* + * ACPI GPIO resources don't contain an initial value for the + * GPIO. Therefore we deduce that value from the pull field + * and the polarity instead. If the pin is pulled up we assume + * default to be high, if it is pulled down we assume default + * to be low, otherwise we leave pin untouched. For active low + * polarity values will be switched. See also + * Documentation/firmware-guide/acpi/gpio-properties.rst. + */ + switch (agpio->pin_config) { + case ACPI_PIN_CONFIG_PULLUP: + return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH; + case ACPI_PIN_CONFIG_PULLDOWN: + return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; + default: + break; + } + break; + default: + break; + } + + /* + * Assume that the BIOS has configured the direction and pull + * accordingly. + */ + return GPIOD_ASIS; +} + +static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, + struct acpi_resource_gpio *agpio, + unsigned int index, + const char *label) +{ + int polarity = GPIO_ACTIVE_HIGH; + enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity); + unsigned int pin = agpio->pin_table[index]; + struct gpio_desc *desc; + int ret; + + desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags); + if (IS_ERR(desc)) + return desc; + + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); + if (ret) + dev_warn(chip->parent, + "Failed to set debounce-timeout for pin 0x%04X, err %d\n", + pin, ret); + + return desc; +} + +static bool acpi_gpio_irq_is_wake(struct device *parent, + const struct acpi_resource_gpio *agpio) +{ + unsigned int pin = agpio->pin_table[0]; + + if (agpio->wake_capable != ACPI_WAKE_CAPABLE) + return false; + + if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) { + dev_info(parent, "Ignoring wakeup on pin %u\n", pin); + return false; + } + + return true; +} + +/* Always returns AE_OK so that we keep looping over the resources */ +static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, + void *context) +{ + struct acpi_gpio_chip *acpi_gpio = context; + struct gpio_chip *chip = acpi_gpio->chip; + struct acpi_resource_gpio *agpio; + acpi_handle handle, evt_handle; + struct acpi_gpio_event *event; + irq_handler_t handler = NULL; + struct gpio_desc *desc; + unsigned int pin; + int ret, irq; + + if (!acpi_gpio_get_irq_resource(ares, &agpio)) + return AE_OK; + + handle = ACPI_HANDLE(chip->parent); + pin = agpio->pin_table[0]; + + if (pin <= 255) { + char ev_name[8]; + sprintf(ev_name, "_%c%02X", + agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', + pin); + if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) + handler = acpi_gpio_irq_handler; + } + if (!handler) { + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) + handler = acpi_gpio_irq_handler_evt; + } + if (!handler) + return AE_OK; + + if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) { + dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin); + return AE_OK; + } + + desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event"); + if (IS_ERR(desc)) { + dev_err(chip->parent, + "Failed to request GPIO for pin 0x%04X, err %ld\n", + pin, PTR_ERR(desc)); + return AE_OK; + } + + ret = gpiochip_lock_as_irq(chip, pin); + if (ret) { + dev_err(chip->parent, + "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n", + pin, ret); + goto fail_free_desc; + } + + irq = gpiod_to_irq(desc); + if (irq < 0) { + dev_err(chip->parent, + "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n", + pin, irq); + goto fail_unlock_irq; + } + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + goto fail_unlock_irq; + + event->irqflags = IRQF_ONESHOT; + if (agpio->triggering == ACPI_LEVEL_SENSITIVE) { + if (agpio->polarity == ACPI_ACTIVE_HIGH) + event->irqflags |= IRQF_TRIGGER_HIGH; + else + event->irqflags |= IRQF_TRIGGER_LOW; + } else { + switch (agpio->polarity) { + case ACPI_ACTIVE_HIGH: + event->irqflags |= IRQF_TRIGGER_RISING; + break; + case ACPI_ACTIVE_LOW: + event->irqflags |= IRQF_TRIGGER_FALLING; + break; + default: + event->irqflags |= IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING; + break; + } + } + + event->handle = evt_handle; + event->handler = handler; + event->irq = irq; + event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio); + event->pin = pin; + event->desc = desc; + + list_add_tail(&event->node, &acpi_gpio->events); + + return AE_OK; + +fail_unlock_irq: + gpiochip_unlock_as_irq(chip, pin); +fail_free_desc: + gpiochip_free_own_desc(desc); + + return AE_OK; +} + +/** + * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events + * @chip: GPIO chip + * + * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are + * handled by ACPI event methods which need to be called from the GPIO + * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which + * GPIO pins have ACPI event methods and assigns interrupt handlers that calls + * the ACPI event methods for those pins. + */ +void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) +{ + struct acpi_gpio_chip *acpi_gpio; + acpi_handle handle; + acpi_status status; + + if (!chip->parent || !chip->to_irq) + return; + + handle = ACPI_HANDLE(chip->parent); + if (!handle) + return; + + status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); + if (ACPI_FAILURE(status)) + return; + + if (acpi_quirk_skip_gpio_event_handlers()) + return; + + acpi_walk_resources(handle, METHOD_NAME__AEI, + acpi_gpiochip_alloc_event, acpi_gpio); + + if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry)) + return; + + acpi_gpiochip_request_irqs(acpi_gpio); +} +EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); + +/** + * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts. + * @chip: GPIO chip + * + * Free interrupts associated with GPIO ACPI event method for the given + * GPIO chip. + */ +void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) +{ + struct acpi_gpio_chip *acpi_gpio; + struct acpi_gpio_event *event, *ep; + acpi_handle handle; + acpi_status status; + + if (!chip->parent || !chip->to_irq) + return; + + handle = ACPI_HANDLE(chip->parent); + if (!handle) + return; + + status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); + if (ACPI_FAILURE(status)) + return; + + acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry); + + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { + if (event->irq_requested) { + if (event->irq_is_wake) + disable_irq_wake(event->irq); + + free_irq(event->irq, event); + } + + gpiochip_unlock_as_irq(chip, event->pin); + gpiochip_free_own_desc(event->desc); + list_del(&event->node); + kfree(event); + } +} +EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts); + +void __init acpi_gpio_process_deferred_list(struct list_head *list) +{ + struct acpi_gpio_chip *acpi_gpio, *tmp; + + list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry) + acpi_gpiochip_request_irqs(acpi_gpio); +} + +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + if (adev && gpios) { + adev->driver_gpios = gpios; + return 0; + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios); + +void acpi_dev_remove_driver_gpios(struct acpi_device *adev) +{ + if (adev) + adev->driver_gpios = NULL; +} +EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios); + +static void acpi_dev_release_driver_gpios(void *adev) +{ + acpi_dev_remove_driver_gpios(adev); +} + +int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + int ret; + + ret = acpi_dev_add_driver_gpios(adev, gpios); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev); +} +EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios); + +static bool acpi_get_driver_gpio_data(struct acpi_device *adev, + const char *name, int index, + struct fwnode_reference_args *args, + unsigned int *quirks) +{ + const struct acpi_gpio_mapping *gm; + + if (!adev || !adev->driver_gpios) + return false; + + for (gm = adev->driver_gpios; gm->name; gm++) + if (!strcmp(name, gm->name) && gm->data && index < gm->size) { + const struct acpi_gpio_params *par = gm->data + index; + + args->fwnode = acpi_fwnode_handle(adev); + args->args[0] = par->crs_entry_index; + args->args[1] = par->line_index; + args->args[2] = par->active_low; + args->nargs = 3; + + *quirks = gm->quirks; + return true; + } + + return false; +} + +static int +__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update) +{ + const enum gpiod_flags mask = + GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | + GPIOD_FLAGS_BIT_DIR_VAL; + int ret = 0; + + /* + * Check if the BIOS has IoRestriction with explicitly set direction + * and update @flags accordingly. Otherwise use whatever caller asked + * for. + */ + if (update & GPIOD_FLAGS_BIT_DIR_SET) { + enum gpiod_flags diff = *flags ^ update; + + /* + * Check if caller supplied incompatible GPIO initialization + * flags. + * + * Return %-EINVAL to notify that firmware has different + * settings and we are going to use them. + */ + if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) || + ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL))) + ret = -EINVAL; + *flags = (*flags & ~mask) | (update & mask); + } + return ret; +} + +static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, + struct acpi_gpio_info *info) +{ + struct device *dev = &info->adev->dev; + enum gpiod_flags old = *flags; + int ret; + + ret = __acpi_gpio_update_gpiod_flags(&old, info->flags); + if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) { + if (ret) + dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n"); + } else { + if (ret) + dev_dbg(dev, "Override GPIO initialization flags\n"); + *flags = old; + } + + return ret; +} + +static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags, + struct acpi_gpio_info *info) +{ + switch (info->pin_config) { + case ACPI_PIN_CONFIG_PULLUP: + *lookupflags |= GPIO_PULL_UP; + break; + case ACPI_PIN_CONFIG_PULLDOWN: + *lookupflags |= GPIO_PULL_DOWN; + break; + case ACPI_PIN_CONFIG_NOPULL: + *lookupflags |= GPIO_PULL_DISABLE; + break; + default: + break; + } + + if (info->polarity == GPIO_ACTIVE_LOW) + *lookupflags |= GPIO_ACTIVE_LOW; + + return 0; +} + +struct acpi_gpio_lookup { + struct acpi_gpio_info info; + int index; + u16 pin_index; + bool active_low; + struct gpio_desc *desc; + int n; +}; + +static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data) +{ + struct acpi_gpio_lookup *lookup = data; + + if (ares->type != ACPI_RESOURCE_TYPE_GPIO) + return 1; + + if (!lookup->desc) { + const struct acpi_resource_gpio *agpio = &ares->data.gpio; + bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; + struct gpio_desc *desc; + u16 pin_index; + + if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint) + lookup->index++; + + if (lookup->n++ != lookup->index) + return 1; + + pin_index = lookup->pin_index; + if (pin_index >= agpio->pin_table_length) + return 1; + + if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER) + desc = gpio_to_desc(agpio->pin_table[pin_index]); + else + desc = acpi_get_gpiod(agpio->resource_source.string_ptr, + agpio->pin_table[pin_index]); + lookup->desc = desc; + lookup->info.pin_config = agpio->pin_config; + lookup->info.debounce = agpio->debounce_timeout; + lookup->info.gpioint = gpioint; + lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio); + + /* + * Polarity and triggering are only specified for GpioInt + * resource. + * Note: we expect here: + * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW + * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH + */ + if (lookup->info.gpioint) { + lookup->info.polarity = agpio->polarity; + lookup->info.triggering = agpio->triggering; + } else { + lookup->info.polarity = lookup->active_low; + } + + lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity); + } + + return 1; +} + +static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup, + struct acpi_gpio_info *info) +{ + struct acpi_device *adev = lookup->info.adev; + struct list_head res_list; + int ret; + + INIT_LIST_HEAD(&res_list); + + ret = acpi_dev_get_resources(adev, &res_list, + acpi_populate_gpio_lookup, + lookup); + if (ret < 0) + return ret; + + acpi_dev_free_resource_list(&res_list); + + if (!lookup->desc) + return -ENOENT; + + if (info) + *info = lookup->info; + return 0; +} + +static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, + const char *propname, int index, + struct acpi_gpio_lookup *lookup) +{ + struct fwnode_reference_args args; + unsigned int quirks = 0; + int ret; + + memset(&args, 0, sizeof(args)); + ret = __acpi_node_get_property_reference(fwnode, propname, index, 3, + &args); + if (ret) { + struct acpi_device *adev; + + adev = to_acpi_device_node(fwnode); + if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks)) + return ret; + } + /* + * The property was found and resolved, so need to lookup the GPIO based + * on returned args. + */ + if (!to_acpi_device_node(args.fwnode)) + return -EINVAL; + if (args.nargs != 3) + return -EPROTO; + + lookup->index = args.args[0]; + lookup->pin_index = args.args[1]; + lookup->active_low = !!args.args[2]; + + lookup->info.adev = to_acpi_device_node(args.fwnode); + lookup->info.quirks = quirks; + + return 0; +} + +/** + * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources + * @adev: pointer to a ACPI device to get GPIO from + * @propname: Property name of the GPIO (optional) + * @index: index of GpioIo/GpioInt resource (starting from %0) + * @info: info pointer to fill in (optional) + * + * Function goes through ACPI resources for @adev and based on @index looks + * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor, + * and returns it. @index matches GpioIo/GpioInt resources only so if there + * are total %3 GPIO resources, the index goes from %0 to %2. + * + * If @propname is specified the GPIO is looked using device property. In + * that case @index is used to select the GPIO entry in the property value + * (in case of multiple). + * + * Returns: + * GPIO descriptor to use with Linux generic GPIO API. + * If the GPIO cannot be translated or there is an error an ERR_PTR is + * returned. + * + * Note: if the GPIO resource has multiple entries in the pin list, this + * function only returns the first. + */ +static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, + const char *propname, + int index, + struct acpi_gpio_info *info) +{ + struct acpi_gpio_lookup lookup; + int ret; + + memset(&lookup, 0, sizeof(lookup)); + lookup.index = index; + + if (propname) { + dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname); + + ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev), + propname, index, &lookup); + if (ret) + return ERR_PTR(ret); + + dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n", + dev_name(&lookup.info.adev->dev), lookup.index, + lookup.pin_index, lookup.active_low); + } else { + dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index); + lookup.info.adev = adev; + } + + ret = acpi_gpio_resource_lookup(&lookup, info); + return ret ? ERR_PTR(ret) : lookup.desc; +} + +/** + * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node + * @fwnode: pointer to an ACPI firmware node to get the GPIO information from + * @propname: Property name of the GPIO + * @index: index of GpioIo/GpioInt resource (starting from %0) + * @info: info pointer to fill in (optional) + * + * This function uses the property-based GPIO lookup to get to the GPIO + * resource with the relevant information from a data-only ACPI firmware node + * and uses that to obtain the GPIO descriptor to return. + * + * Returns: + * GPIO descriptor to use with Linux generic GPIO API. + * If the GPIO cannot be translated or there is an error an ERR_PTR is + * returned. + */ +static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, + const char *propname, + int index, + struct acpi_gpio_info *info) +{ + struct acpi_gpio_lookup lookup; + int ret; + + if (!is_acpi_data_node(fwnode)) + return ERR_PTR(-ENODEV); + + if (!propname) + return ERR_PTR(-EINVAL); + + memset(&lookup, 0, sizeof(lookup)); + lookup.index = index; + + ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup); + if (ret) + return ERR_PTR(ret); + + ret = acpi_gpio_resource_lookup(&lookup, info); + return ret ? ERR_PTR(ret) : lookup.desc; +} + +static bool acpi_can_fallback_to_crs(struct acpi_device *adev, + const char *con_id) +{ + /* If there is no ACPI device, there is no _CRS to fall back to */ + if (!adev) + return false; + + /* Never allow fallback if the device has properties */ + if (acpi_dev_has_props(adev) || adev->driver_gpios) + return false; + + return con_id == NULL; +} + +static struct gpio_desc * +__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, + bool can_fallback, struct acpi_gpio_info *info) +{ + struct acpi_device *adev = to_acpi_device_node(fwnode); + struct gpio_desc *desc; + char propname[32]; + + /* Try first from _DSD */ + for_each_gpio_property_name(propname, con_id) { + if (adev) + desc = acpi_get_gpiod_by_index(adev, + propname, idx, info); + else + desc = acpi_get_gpiod_from_data(fwnode, + propname, idx, info); + if (PTR_ERR(desc) == -EPROBE_DEFER) + return ERR_CAST(desc); + + if (!IS_ERR(desc)) + return desc; + } + + /* Then from plain _CRS GPIOs */ + if (can_fallback) + return acpi_get_gpiod_by_index(adev, NULL, idx, info); + + return ERR_PTR(-ENOENT); +} + +struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, + const char *con_id, + unsigned int idx, + enum gpiod_flags *dflags, + unsigned long *lookupflags) +{ + struct acpi_device *adev = to_acpi_device_node(fwnode); + bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); + struct acpi_gpio_info info; + struct gpio_desc *desc; + + desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); + if (IS_ERR(desc)) + return desc; + + if (info.gpioint && + (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) { + dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n"); + return ERR_PTR(-ENOENT); + } + + acpi_gpio_update_gpiod_flags(dflags, &info); + acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info); + return desc; +} + +/** + * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number + * @adev: pointer to a ACPI device to get IRQ from + * @con_id: optional name of GpioInt resource + * @index: index of GpioInt resource (starting from %0) + * @wake_capable: Set to true if the IRQ is wake capable + * + * If the device has one or more GpioInt resources, this function can be + * used to translate from the GPIO offset in the resource to the Linux IRQ + * number. + * + * The function is idempotent, though each time it runs it will configure GPIO + * pin direction according to the flags in GpioInt resource. + * + * The function takes optional @con_id parameter. If the resource has + * a @con_id in a property, then only those will be taken into account. + * + * The GPIO is considered wake capable if the GpioInt resource specifies + * SharedAndWake or ExclusiveAndWake. + * + * Returns: + * Linux IRQ number (> 0) on success, negative errno on failure. + */ +int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, + bool *wake_capable) +{ + struct fwnode_handle *fwnode = acpi_fwnode_handle(adev); + int idx, i; + unsigned int irq_flags; + int ret; + + for (i = 0, idx = 0; idx <= index; i++) { + struct acpi_gpio_info info; + struct gpio_desc *desc; + + /* Ignore -EPROBE_DEFER, it only matters if idx matches */ + desc = __acpi_find_gpio(fwnode, con_id, i, true, &info); + if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) + return PTR_ERR(desc); + + if (info.gpioint && idx++ == index) { + unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT; + enum gpiod_flags dflags = GPIOD_ASIS; + char label[32]; + int irq; + + if (IS_ERR(desc)) + return PTR_ERR(desc); + + irq = gpiod_to_irq(desc); + if (irq < 0) + return irq; + + acpi_gpio_update_gpiod_flags(&dflags, &info); + acpi_gpio_update_gpiod_lookup_flags(&lflags, &info); + + snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index); + ret = gpiod_set_consumer_name(desc, con_id ?: label); + if (ret) + return ret; + + ret = gpiod_configure_flags(desc, label, lflags, dflags); + if (ret < 0) + return ret; + + /* ACPI uses hundredths of milliseconds units */ + ret = gpio_set_debounce_timeout(desc, info.debounce * 10); + if (ret) + return ret; + + irq_flags = acpi_dev_get_irq_type(info.triggering, + info.polarity); + + /* + * If the IRQ is not already in use then set type + * if specified and different than the current one. + */ + if (can_request_irq(irq, irq_flags)) { + if (irq_flags != IRQ_TYPE_NONE && + irq_flags != irq_get_trigger_type(irq)) + irq_set_irq_type(irq, irq_flags); + } else { + dev_dbg(&adev->dev, "IRQ %d already in use\n", irq); + } + + /* avoid suspend issues with GPIOs when systems are using S3 */ + if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) + *wake_capable = info.wake_capable; + + return irq; + } + + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by); + +static acpi_status +acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, + u32 bits, u64 *value, void *handler_context, + void *region_context) +{ + struct acpi_gpio_chip *achip = region_context; + struct gpio_chip *chip = achip->chip; + struct acpi_resource_gpio *agpio; + struct acpi_resource *ares; + u16 pin_index = address; + acpi_status status; + int length; + int i; + + status = acpi_buffer_to_resource(achip->conn_info.connection, + achip->conn_info.length, &ares); + if (ACPI_FAILURE(status)) + return status; + + if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) { + ACPI_FREE(ares); + return AE_BAD_PARAMETER; + } + + agpio = &ares->data.gpio; + + if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT && + function == ACPI_WRITE)) { + ACPI_FREE(ares); + return AE_BAD_PARAMETER; + } + + length = min_t(u16, agpio->pin_table_length, pin_index + bits); + for (i = pin_index; i < length; ++i) { + unsigned int pin = agpio->pin_table[i]; + struct acpi_gpio_connection *conn; + struct gpio_desc *desc; + bool found; + + mutex_lock(&achip->conn_lock); + + found = false; + list_for_each_entry(conn, &achip->conns, node) { + if (conn->pin == pin) { + found = true; + desc = conn->desc; + break; + } + } + + /* + * The same GPIO can be shared between operation region and + * event but only if the access here is ACPI_READ. In that + * case we "borrow" the event GPIO instead. + */ + if (!found && agpio->shareable == ACPI_SHARED && + function == ACPI_READ) { + struct acpi_gpio_event *event; + + list_for_each_entry(event, &achip->events, node) { + if (event->pin == pin) { + desc = event->desc; + found = true; + break; + } + } + } + + if (!found) { + desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion"); + if (IS_ERR(desc)) { + mutex_unlock(&achip->conn_lock); + status = AE_ERROR; + goto out; + } + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) { + gpiochip_free_own_desc(desc); + mutex_unlock(&achip->conn_lock); + status = AE_NO_MEMORY; + goto out; + } + + conn->pin = pin; + conn->desc = desc; + list_add_tail(&conn->node, &achip->conns); + } + + mutex_unlock(&achip->conn_lock); + + if (function == ACPI_WRITE) + gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i))); + else + *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i; + } + +out: + ACPI_FREE(ares); + return status; +} + +static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip) +{ + struct gpio_chip *chip = achip->chip; + acpi_handle handle = ACPI_HANDLE(chip->parent); + acpi_status status; + + INIT_LIST_HEAD(&achip->conns); + mutex_init(&achip->conn_lock); + status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO, + acpi_gpio_adr_space_handler, + NULL, achip); + if (ACPI_FAILURE(status)) + dev_err(chip->parent, + "Failed to install GPIO OpRegion handler\n"); +} + +static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip) +{ + struct gpio_chip *chip = achip->chip; + acpi_handle handle = ACPI_HANDLE(chip->parent); + struct acpi_gpio_connection *conn, *tmp; + acpi_status status; + + status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO, + acpi_gpio_adr_space_handler); + if (ACPI_FAILURE(status)) { + dev_err(chip->parent, + "Failed to remove GPIO OpRegion handler\n"); + return; + } + + list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) { + gpiochip_free_own_desc(conn->desc); + list_del(&conn->node); + kfree(conn); + } +} + +static struct gpio_desc * +acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip, + struct fwnode_handle *fwnode, + const char **name, + unsigned long *lflags, + enum gpiod_flags *dflags) +{ + struct gpio_chip *chip = achip->chip; + struct gpio_desc *desc; + u32 gpios[2]; + int ret; + + *lflags = GPIO_LOOKUP_FLAGS_DEFAULT; + *dflags = GPIOD_ASIS; + *name = NULL; + + ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios, + ARRAY_SIZE(gpios)); + if (ret < 0) + return ERR_PTR(ret); + + desc = gpiochip_get_desc(chip, gpios[0]); + if (IS_ERR(desc)) + return desc; + + if (gpios[1]) + *lflags |= GPIO_ACTIVE_LOW; + + if (fwnode_property_present(fwnode, "input")) + *dflags |= GPIOD_IN; + else if (fwnode_property_present(fwnode, "output-low")) + *dflags |= GPIOD_OUT_LOW; + else if (fwnode_property_present(fwnode, "output-high")) + *dflags |= GPIOD_OUT_HIGH; + else + return ERR_PTR(-EINVAL); + + fwnode_property_read_string(fwnode, "line-name", name); + + return desc; +} + +static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip) +{ + struct gpio_chip *chip = achip->chip; + struct fwnode_handle *fwnode; + + device_for_each_child_node(chip->parent, fwnode) { + unsigned long lflags; + enum gpiod_flags dflags; + struct gpio_desc *desc; + const char *name; + int ret; + + if (!fwnode_property_present(fwnode, "gpio-hog")) + continue; + + desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name, + &lflags, &dflags); + if (IS_ERR(desc)) + continue; + + ret = gpiod_hog(desc, name, lflags, dflags); + if (ret) { + dev_err(chip->parent, "Failed to hog GPIO\n"); + fwnode_handle_put(fwnode); + return; + } + } +} + +void acpi_gpiochip_add(struct gpio_chip *chip) +{ + struct acpi_gpio_chip *acpi_gpio; + struct acpi_device *adev; + acpi_status status; + + if (!chip || !chip->parent) + return; + + adev = ACPI_COMPANION(chip->parent); + if (!adev) + return; + + acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL); + if (!acpi_gpio) { + dev_err(chip->parent, + "Failed to allocate memory for ACPI GPIO chip\n"); + return; + } + + acpi_gpio->chip = chip; + INIT_LIST_HEAD(&acpi_gpio->events); + INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); + + status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio); + if (ACPI_FAILURE(status)) { + dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n"); + kfree(acpi_gpio); + return; + } + + acpi_gpiochip_request_regions(acpi_gpio); + acpi_gpiochip_scan_gpios(acpi_gpio); + acpi_dev_clear_dependencies(adev); +} + +void acpi_gpiochip_remove(struct gpio_chip *chip) +{ + struct acpi_gpio_chip *acpi_gpio; + acpi_handle handle; + acpi_status status; + + if (!chip || !chip->parent) + return; + + handle = ACPI_HANDLE(chip->parent); + if (!handle) + return; + + status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); + if (ACPI_FAILURE(status)) { + dev_warn(chip->parent, "Failed to retrieve ACPI GPIO chip\n"); + return; + } + + acpi_gpiochip_free_regions(acpi_gpio); + + acpi_detach_data(handle, acpi_gpio_chip_dh); + kfree(acpi_gpio); +} + +static int acpi_gpio_package_count(const union acpi_object *obj) +{ + const union acpi_object *element = obj->package.elements; + const union acpi_object *end = element + obj->package.count; + unsigned int count = 0; + + while (element < end) { + switch (element->type) { + case ACPI_TYPE_LOCAL_REFERENCE: + element += 3; + fallthrough; + case ACPI_TYPE_INTEGER: + element++; + count++; + break; + + default: + return -EPROTO; + } + } + + return count; +} + +static int acpi_find_gpio_count(struct acpi_resource *ares, void *data) +{ + unsigned int *count = data; + + if (ares->type == ACPI_RESOURCE_TYPE_GPIO) + *count += ares->data.gpio.pin_table_length; + + return 1; +} + +/** + * acpi_gpio_count - count the GPIOs associated with a firmware node / function + * @fwnode: firmware node of the GPIO consumer + * @con_id: function within the GPIO consumer + * + * Returns: + * The number of GPIOs associated with a firmware node / function or %-ENOENT, + * if no GPIO has been assigned to the requested function. + */ +int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) +{ + struct acpi_device *adev = to_acpi_device_node(fwnode); + const union acpi_object *obj; + const struct acpi_gpio_mapping *gm; + int count = -ENOENT; + int ret; + char propname[32]; + + /* Try first from _DSD */ + for_each_gpio_property_name(propname, con_id) { + ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj); + if (ret == 0) { + if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) + count = 1; + else if (obj->type == ACPI_TYPE_PACKAGE) + count = acpi_gpio_package_count(obj); + } else if (adev->driver_gpios) { + for (gm = adev->driver_gpios; gm->name; gm++) + if (strcmp(propname, gm->name) == 0) { + count = gm->size; + break; + } + } + if (count > 0) + break; + } + + /* Then from plain _CRS GPIOs */ + if (count < 0) { + struct list_head resource_list; + unsigned int crs_count = 0; + + if (!acpi_can_fallback_to_crs(adev, con_id)) + return count; + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(adev, &resource_list, + acpi_find_gpio_count, &crs_count); + acpi_dev_free_resource_list(&resource_list); + if (crs_count > 0) + count = crs_count; + } + return count ? count : -ENOENT; +} diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c new file mode 100644 index 000000000000..2c20bda54a6d --- /dev/null +++ b/drivers/gpio/gpiolib-acpi-quirks.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI quirks for GPIO ACPI helpers + * + * Author: Hans de Goede hdegoede@redhat.com + */ + +#include <linux/dmi.h> +#include <linux/kstrtox.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/string.h> +#include <linux/types.h> + +#include "gpiolib-acpi.h" + +static int run_edge_events_on_boot = -1; +module_param(run_edge_events_on_boot, int, 0444); +MODULE_PARM_DESC(run_edge_events_on_boot, + "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); + +static char *ignore_wake; +module_param(ignore_wake, charp, 0444); +MODULE_PARM_DESC(ignore_wake, + "controller@pin combos on which to ignore the ACPI wake flag " + "ignore_wake=controller@pin[,controller@pin[,...]]"); + +static char *ignore_interrupt; +module_param(ignore_interrupt, charp, 0444); +MODULE_PARM_DESC(ignore_interrupt, + "controller@pin combos on which to ignore interrupt " + "ignore_interrupt=controller@pin[,controller@pin[,...]]"); + +/* + * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init + * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a + * late_initcall_sync() handler, so that other builtin drivers can register their + * OpRegions before the event handlers can run. This list contains GPIO chips + * for which the acpi_gpiochip_request_irqs() call has been deferred. + */ +static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); +static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); +static bool acpi_gpio_deferred_req_irqs_done; + +bool acpi_gpio_add_to_deferred_list(struct list_head *list) +{ + bool defer; + + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); + defer = !acpi_gpio_deferred_req_irqs_done; + if (defer) + list_add(list, &acpi_gpio_deferred_req_irqs_list); + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); + + return defer; +} + +void acpi_gpio_remove_from_deferred_list(struct list_head *list) +{ + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); + if (!list_empty(list)) + list_del_init(list); + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); +} + +int acpi_gpio_need_run_edge_events_on_boot(void) +{ + return run_edge_events_on_boot; +} + +bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list, + const char *controller_in, unsigned int pin_in) +{ + const char *ignore_list, *controller, *pin_str; + unsigned int pin; + char *endp; + int len; + + switch (list) { + case ACPI_GPIO_IGNORE_WAKE: + ignore_list = ignore_wake; + break; + case ACPI_GPIO_IGNORE_INTERRUPT: + ignore_list = ignore_interrupt; + break; + default: + return false; + } + + controller = ignore_list; + while (controller) { + pin_str = strchr(controller, '@'); + if (!pin_str) + goto err; + + len = pin_str - controller; + if (len == strlen(controller_in) && + strncmp(controller, controller_in, len) == 0) { + pin = simple_strtoul(pin_str + 1, &endp, 10); + if (*endp != 0 && *endp != ',') + goto err; + + if (pin == pin_in) + return true; + } + + controller = strchr(controller, ','); + if (controller) + controller++; + } + + return false; +err: + pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list); + return false; +} + +/* Run deferred acpi_gpiochip_request_irqs() */ +static int __init acpi_gpio_handle_deferred_request_irqs(void) +{ + mutex_lock(&acpi_gpio_deferred_req_irqs_lock); + acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list); + acpi_gpio_deferred_req_irqs_done = true; + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); + + return 0; +} +/* We must use _sync so that this runs after the first deferred_probe run */ +late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); + +struct acpi_gpiolib_dmi_quirk { + bool no_edge_events_on_boot; + char *ignore_wake; + char *ignore_interrupt; +}; + +static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { + { + /* + * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for + * a non existing micro-USB-B connector which puts the HDMI + * DDC pins in GPIO mode, breaking HDMI support. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), + DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, + }, + { + /* + * The Terra Pad 1061 has a micro-USB-B id-pin handler, which + * instead of controlling the actual micro-USB-B turns the 5V + * boost for its USB-A connector off. The actual micro-USB-B + * connector is wired for charging only. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), + DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .no_edge_events_on_boot = true, + }, + }, + { + /* + * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FFC:02 pin 12, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FC:02@12", + }, + }, + { + /* + * HP X2 10 models with Cherry Trail SoC + TI PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + * When suspending by closing the LID, the power to the USB + * keyboard is turned off, causing INT0002 ACPI events to + * trigger once the XHCI controller notices the keyboard is + * gone. So INT0002 events cause spurious wakeups too. Ignoring + * EC wakes breaks wakeup when opening the lid, the user needs + * to press the power-button to wakeup the system. The + * alternative is suspend simply not working, which is worse. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0,INT0002:00@2", + }, + }, + { + /* + * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FC:02 pin 28, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "815D"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FC:02@28", + }, + }, + { + /* + * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an + * external embedded-controller connected via I2C + an ACPI GPIO + * event handler on INT33FF:01 pin 0, causing spurious wakeups. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + DMI_MATCH(DMI_BOARD_NAME, "813E"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "INT33FF:01@0", + }, + }, + { + /* + * Interrupt storm caused from edge triggered floating pin + * Found in BIOS UX325UAZ.300 + * https://bugzilla.kernel.org/show_bug.cgi?id=216208 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "AMDI0030:00@18", + }, + }, + { + /* + * Spurious wakeups from TP_ATTN# pin + * Found in BIOS 1.7.8 + * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627 + */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "ELAN0415:00@9", + }, + }, + { + /* + * Spurious wakeups from TP_ATTN# pin + * Found in BIOS 1.7.8 + * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627 + */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "ELAN0415:00@9", + }, + }, + { + /* + * Spurious wakeups from TP_ATTN# pin + * Found in BIOS 1.7.7 + */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "SYNA1202:00@16", + }, + }, + { + /* + * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to + * a "dolby" button. At the ACPI level an _AEI event-handler + * is connected which sets an ACPI variable to 1 on both + * edges. This variable can be polled + cleared to 0 using + * WMI. But since the variable is set on both edges the WMI + * interface is pretty useless even when polling. + * So instead the x86-android-tablets code instantiates + * a gpio-keys platform device for it. + * Ignore the _AEI handler for the pin, so that it is not busy. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), + DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "INT33FC:00@3", + }, + }, + { + /* + * Spurious wakeups from TP_ATTN# pin + * Found in BIOS 0.35 + * https://gitlab.freedesktop.org/drm/amd/-/issues/3073 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "PNP0C50:00@8", + }, + }, + { + /* + * Spurious wakeups from GPIO 11 + * Found in BIOS 1.04 + * https://gitlab.freedesktop.org/drm/amd/-/issues/3954 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "AMDI0030:00@11", + }, + }, + { + /* + * Wakeup only works when keyboard backlight is turned off + * https://gitlab.freedesktop.org/drm/amd/-/issues/4169 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "AMDI0030:00@8", + }, + }, + { + /* + * Spurious wakeups from TP_ATTN# pin + * Found in BIOS 5.35 + * https://gitlab.freedesktop.org/drm/amd/-/issues/4482 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "ASCP1A00:00@8", + }, + }, + { + /* + * Spurious wakeups, likely from touchpad controller + * Dell Precision 7780 + * Found in BIOS 1.24.1 + * + * Found in touchpad firmware, installed by Dell Touchpad Firmware Update Utility version 1160.4196.9, A01 + * ( Dell-Touchpad-Firmware-Update-Utility_VYGNN_WIN64_1160.4196.9_A00.EXE ), + * released on 11 Jul 2024 + * + * https://lore.kernel.org/linux-i2c/197ae95ffd8.dc819e60457077.769212048860909... + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Precision"), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7780"), + DMI_MATCH(DMI_BOARD_NAME, "0C6JVW"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "VEN_0488:00@355", + }, + }, + {} /* Terminating entry */ +}; + +static int __init acpi_gpio_setup_params(void) +{ + const struct acpi_gpiolib_dmi_quirk *quirk = NULL; + const struct dmi_system_id *id; + + id = dmi_first_match(gpiolib_acpi_quirks); + if (id) + quirk = id->driver_data; + + if (run_edge_events_on_boot < 0) { + if (quirk && quirk->no_edge_events_on_boot) + run_edge_events_on_boot = 0; + else + run_edge_events_on_boot = 1; + } + + if (ignore_wake == NULL && quirk && quirk->ignore_wake) + ignore_wake = quirk->ignore_wake; + + if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt) + ignore_interrupt = quirk->ignore_interrupt; + + return 0; +} + +/* Directly after dmi_setup() which runs as core_initcall() */ +postcore_initcall(acpi_gpio_setup_params); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c deleted file mode 100644 index 148b4d1788a2..000000000000 --- a/drivers/gpio/gpiolib-acpi.c +++ /dev/null @@ -1,1737 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ACPI helpers for GPIO API - * - * Copyright (C) 2012, Intel Corporation - * Authors: Mathias Nyman mathias.nyman@linux.intel.com - * Mika Westerberg mika.westerberg@linux.intel.com - */ - -#include <linux/acpi.h> -#include <linux/dmi.h> -#include <linux/errno.h> -#include <linux/export.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/mutex.h> -#include <linux/pinctrl/pinctrl.h> - -#include <linux/gpio/consumer.h> -#include <linux/gpio/driver.h> -#include <linux/gpio/machine.h> - -#include "gpiolib.h" -#include "gpiolib-acpi.h" - -static int run_edge_events_on_boot = -1; -module_param(run_edge_events_on_boot, int, 0444); -MODULE_PARM_DESC(run_edge_events_on_boot, - "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto"); - -static char *ignore_wake; -module_param(ignore_wake, charp, 0444); -MODULE_PARM_DESC(ignore_wake, - "controller@pin combos on which to ignore the ACPI wake flag " - "ignore_wake=controller@pin[,controller@pin[,...]]"); - -static char *ignore_interrupt; -module_param(ignore_interrupt, charp, 0444); -MODULE_PARM_DESC(ignore_interrupt, - "controller@pin combos on which to ignore interrupt " - "ignore_interrupt=controller@pin[,controller@pin[,...]]"); - -struct acpi_gpiolib_dmi_quirk { - bool no_edge_events_on_boot; - char *ignore_wake; - char *ignore_interrupt; -}; - -/** - * struct acpi_gpio_event - ACPI GPIO event handler data - * - * @node: list-entry of the events list of the struct acpi_gpio_chip - * @handle: handle of ACPI method to execute when the IRQ triggers - * @handler: handler function to pass to request_irq() when requesting the IRQ - * @pin: GPIO pin number on the struct gpio_chip - * @irq: Linux IRQ number for the event, for request_irq() / free_irq() - * @irqflags: flags to pass to request_irq() when requesting the IRQ - * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source - * @irq_requested:True if request_irq() has been done - * @desc: struct gpio_desc for the GPIO pin for this event - */ -struct acpi_gpio_event { - struct list_head node; - acpi_handle handle; - irq_handler_t handler; - unsigned int pin; - unsigned int irq; - unsigned long irqflags; - bool irq_is_wake; - bool irq_requested; - struct gpio_desc *desc; -}; - -struct acpi_gpio_connection { - struct list_head node; - unsigned int pin; - struct gpio_desc *desc; -}; - -struct acpi_gpio_chip { - /* - * ACPICA requires that the first field of the context parameter - * passed to acpi_install_address_space_handler() is large enough - * to hold struct acpi_connection_info. - */ - struct acpi_connection_info conn_info; - struct list_head conns; - struct mutex conn_lock; - struct gpio_chip *chip; - struct list_head events; - struct list_head deferred_req_irqs_list_entry; -}; - -/** - * struct acpi_gpio_info - ACPI GPIO specific information - * @adev: reference to ACPI device which consumes GPIO resource - * @flags: GPIO initialization flags - * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo - * @pin_config: pin bias as provided by ACPI - * @polarity: interrupt polarity as provided by ACPI - * @triggering: triggering type as provided by ACPI - * @wake_capable: wake capability as provided by ACPI - * @debounce: debounce timeout as provided by ACPI - * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping - */ -struct acpi_gpio_info { - struct acpi_device *adev; - enum gpiod_flags flags; - bool gpioint; - int pin_config; - int polarity; - int triggering; - bool wake_capable; - unsigned int debounce; - unsigned int quirks; -}; - -/* - * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init - * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a - * late_initcall_sync() handler, so that other builtin drivers can register their - * OpRegions before the event handlers can run. This list contains GPIO chips - * for which the acpi_gpiochip_request_irqs() call has been deferred. - */ -static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock); -static LIST_HEAD(acpi_gpio_deferred_req_irqs_list); -static bool acpi_gpio_deferred_req_irqs_done; - -static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data) -{ - /* First check the actual GPIO device */ - if (device_match_acpi_handle(&gc->gpiodev->dev, data)) - return true; - - /* - * When the ACPI device is artificially split to the banks of GPIOs, - * where each of them is represented by a separate GPIO device, - * the firmware node of the physical device may not be shared among - * the banks as they may require different values for the same property, - * e.g., number of GPIOs in a certain bank. In such case the ACPI handle - * of a GPIO device is NULL and can not be used. Hence we have to check - * the parent device to be sure that there is no match before bailing - * out. - */ - if (gc->parent) - return device_match_acpi_handle(gc->parent, data); - - return false; -} - -/** - * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API - * @path: ACPI GPIO controller full path name, (e.g. "\_SB.GPO1") - * @pin: ACPI GPIO pin number (0-based, controller-relative) - * - * Returns: - * GPIO descriptor to use with Linux generic GPIO API. - * If the GPIO cannot be translated or there is an error an ERR_PTR is - * returned. - * - * Specifically returns %-EPROBE_DEFER if the referenced GPIO - * controller does not have GPIO chip registered at the moment. This is to - * support probe deferral. - */ -static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin) -{ - acpi_handle handle; - acpi_status status; - - status = acpi_get_handle(NULL, path, &handle); - if (ACPI_FAILURE(status)) - return ERR_PTR(-ENODEV); - - struct gpio_device *gdev __free(gpio_device_put) = - gpio_device_find(handle, acpi_gpiochip_find); - if (!gdev) - return ERR_PTR(-EPROBE_DEFER); - - /* - * FIXME: keep track of the reference to the GPIO device somehow - * instead of putting it here. - */ - return gpio_device_get_desc(gdev, pin); -} - -static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) -{ - struct acpi_gpio_event *event = data; - - acpi_evaluate_object(event->handle, NULL, NULL, NULL); - - return IRQ_HANDLED; -} - -static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data) -{ - struct acpi_gpio_event *event = data; - - acpi_execute_simple_method(event->handle, NULL, event->pin); - - return IRQ_HANDLED; -} - -static void acpi_gpio_chip_dh(acpi_handle handle, void *data) -{ - /* The address of this function is used as a key. */ -} - -bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, - struct acpi_resource_gpio **agpio) -{ - struct acpi_resource_gpio *gpio; - - if (ares->type != ACPI_RESOURCE_TYPE_GPIO) - return false; - - gpio = &ares->data.gpio; - if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT) - return false; - - *agpio = gpio; - return true; -} -EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource); - -/** - * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO - * I/O resource or return False if not. - * @ares: Pointer to the ACPI resource to fetch - * @agpio: Pointer to a &struct acpi_resource_gpio to store the output pointer - * - * Returns: - * %true if GpioIo resource is found, %false otherwise. - */ -bool acpi_gpio_get_io_resource(struct acpi_resource *ares, - struct acpi_resource_gpio **agpio) -{ - struct acpi_resource_gpio *gpio; - - if (ares->type != ACPI_RESOURCE_TYPE_GPIO) - return false; - - gpio = &ares->data.gpio; - if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO) - return false; - - *agpio = gpio; - return true; -} -EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource); - -static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio, - struct acpi_gpio_event *event) -{ - struct device *parent = acpi_gpio->chip->parent; - int ret, value; - - ret = request_threaded_irq(event->irq, NULL, event->handler, - event->irqflags | IRQF_ONESHOT, "ACPI:Event", event); - if (ret) { - dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq); - return; - } - - if (event->irq_is_wake) - enable_irq_wake(event->irq); - - event->irq_requested = true; - - /* Make sure we trigger the initial state of edge-triggered IRQs */ - if (run_edge_events_on_boot && - (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) { - value = gpiod_get_raw_value_cansleep(event->desc); - if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || - ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) - event->handler(event->irq, event); - } -} - -static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) -{ - struct acpi_gpio_event *event; - - list_for_each_entry(event, &acpi_gpio->events, node) - acpi_gpiochip_request_irq(acpi_gpio, event); -} - -static enum gpiod_flags -acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity) -{ - /* GpioInt() implies input configuration */ - if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) - return GPIOD_IN; - - switch (agpio->io_restriction) { - case ACPI_IO_RESTRICT_INPUT: - return GPIOD_IN; - case ACPI_IO_RESTRICT_OUTPUT: - /* - * ACPI GPIO resources don't contain an initial value for the - * GPIO. Therefore we deduce that value from the pull field - * and the polarity instead. If the pin is pulled up we assume - * default to be high, if it is pulled down we assume default - * to be low, otherwise we leave pin untouched. For active low - * polarity values will be switched. See also - * Documentation/firmware-guide/acpi/gpio-properties.rst. - */ - switch (agpio->pin_config) { - case ACPI_PIN_CONFIG_PULLUP: - return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH; - case ACPI_PIN_CONFIG_PULLDOWN: - return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; - default: - break; - } - break; - default: - break; - } - - /* - * Assume that the BIOS has configured the direction and pull - * accordingly. - */ - return GPIOD_ASIS; -} - -static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, - struct acpi_resource_gpio *agpio, - unsigned int index, - const char *label) -{ - int polarity = GPIO_ACTIVE_HIGH; - enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity); - unsigned int pin = agpio->pin_table[index]; - struct gpio_desc *desc; - int ret; - - desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags); - if (IS_ERR(desc)) - return desc; - - /* ACPI uses hundredths of milliseconds units */ - ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); - if (ret) - dev_warn(chip->parent, - "Failed to set debounce-timeout for pin 0x%04X, err %d\n", - pin, ret); - - return desc; -} - -static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in, - unsigned int pin_in) -{ - const char *controller, *pin_str; - unsigned int pin; - char *endp; - int len; - - controller = ignore_list; - while (controller) { - pin_str = strchr(controller, '@'); - if (!pin_str) - goto err; - - len = pin_str - controller; - if (len == strlen(controller_in) && - strncmp(controller, controller_in, len) == 0) { - pin = simple_strtoul(pin_str + 1, &endp, 10); - if (*endp != 0 && *endp != ',') - goto err; - - if (pin == pin_in) - return true; - } - - controller = strchr(controller, ','); - if (controller) - controller++; - } - - return false; -err: - pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list); - return false; -} - -static bool acpi_gpio_irq_is_wake(struct device *parent, - const struct acpi_resource_gpio *agpio) -{ - unsigned int pin = agpio->pin_table[0]; - - if (agpio->wake_capable != ACPI_WAKE_CAPABLE) - return false; - - if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) { - dev_info(parent, "Ignoring wakeup on pin %u\n", pin); - return false; - } - - return true; -} - -/* Always returns AE_OK so that we keep looping over the resources */ -static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, - void *context) -{ - struct acpi_gpio_chip *acpi_gpio = context; - struct gpio_chip *chip = acpi_gpio->chip; - struct acpi_resource_gpio *agpio; - acpi_handle handle, evt_handle; - struct acpi_gpio_event *event; - irq_handler_t handler = NULL; - struct gpio_desc *desc; - unsigned int pin; - int ret, irq; - - if (!acpi_gpio_get_irq_resource(ares, &agpio)) - return AE_OK; - - handle = ACPI_HANDLE(chip->parent); - pin = agpio->pin_table[0]; - - if (pin <= 255) { - char ev_name[8]; - sprintf(ev_name, "_%c%02X", - agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', - pin); - if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) - handler = acpi_gpio_irq_handler; - } - if (!handler) { - if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) - handler = acpi_gpio_irq_handler_evt; - } - if (!handler) - return AE_OK; - - if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) { - dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin); - return AE_OK; - } - - desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event"); - if (IS_ERR(desc)) { - dev_err(chip->parent, - "Failed to request GPIO for pin 0x%04X, err %ld\n", - pin, PTR_ERR(desc)); - return AE_OK; - } - - ret = gpiochip_lock_as_irq(chip, pin); - if (ret) { - dev_err(chip->parent, - "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n", - pin, ret); - goto fail_free_desc; - } - - irq = gpiod_to_irq(desc); - if (irq < 0) { - dev_err(chip->parent, - "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n", - pin, irq); - goto fail_unlock_irq; - } - - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - goto fail_unlock_irq; - - event->irqflags = IRQF_ONESHOT; - if (agpio->triggering == ACPI_LEVEL_SENSITIVE) { - if (agpio->polarity == ACPI_ACTIVE_HIGH) - event->irqflags |= IRQF_TRIGGER_HIGH; - else - event->irqflags |= IRQF_TRIGGER_LOW; - } else { - switch (agpio->polarity) { - case ACPI_ACTIVE_HIGH: - event->irqflags |= IRQF_TRIGGER_RISING; - break; - case ACPI_ACTIVE_LOW: - event->irqflags |= IRQF_TRIGGER_FALLING; - break; - default: - event->irqflags |= IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING; - break; - } - } - - event->handle = evt_handle; - event->handler = handler; - event->irq = irq; - event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio); - event->pin = pin; - event->desc = desc; - - list_add_tail(&event->node, &acpi_gpio->events); - - return AE_OK; - -fail_unlock_irq: - gpiochip_unlock_as_irq(chip, pin); -fail_free_desc: - gpiochip_free_own_desc(desc); - - return AE_OK; -} - -/** - * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events - * @chip: GPIO chip - * - * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are - * handled by ACPI event methods which need to be called from the GPIO - * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which - * GPIO pins have ACPI event methods and assigns interrupt handlers that calls - * the ACPI event methods for those pins. - */ -void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) -{ - struct acpi_gpio_chip *acpi_gpio; - acpi_handle handle; - acpi_status status; - bool defer; - - if (!chip->parent || !chip->to_irq) - return; - - handle = ACPI_HANDLE(chip->parent); - if (!handle) - return; - - status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); - if (ACPI_FAILURE(status)) - return; - - if (acpi_quirk_skip_gpio_event_handlers()) - return; - - acpi_walk_resources(handle, METHOD_NAME__AEI, - acpi_gpiochip_alloc_event, acpi_gpio); - - mutex_lock(&acpi_gpio_deferred_req_irqs_lock); - defer = !acpi_gpio_deferred_req_irqs_done; - if (defer) - list_add(&acpi_gpio->deferred_req_irqs_list_entry, - &acpi_gpio_deferred_req_irqs_list); - mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); - - if (defer) - return; - - acpi_gpiochip_request_irqs(acpi_gpio); -} -EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); - -/** - * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts. - * @chip: GPIO chip - * - * Free interrupts associated with GPIO ACPI event method for the given - * GPIO chip. - */ -void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) -{ - struct acpi_gpio_chip *acpi_gpio; - struct acpi_gpio_event *event, *ep; - acpi_handle handle; - acpi_status status; - - if (!chip->parent || !chip->to_irq) - return; - - handle = ACPI_HANDLE(chip->parent); - if (!handle) - return; - - status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); - if (ACPI_FAILURE(status)) - return; - - mutex_lock(&acpi_gpio_deferred_req_irqs_lock); - if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry)) - list_del_init(&acpi_gpio->deferred_req_irqs_list_entry); - mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); - - list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { - if (event->irq_requested) { - if (event->irq_is_wake) - disable_irq_wake(event->irq); - - free_irq(event->irq, event); - } - - gpiochip_unlock_as_irq(chip, event->pin); - gpiochip_free_own_desc(event->desc); - list_del(&event->node); - kfree(event); - } -} -EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts); - -int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios) -{ - if (adev && gpios) { - adev->driver_gpios = gpios; - return 0; - } - return -EINVAL; -} -EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios); - -void acpi_dev_remove_driver_gpios(struct acpi_device *adev) -{ - if (adev) - adev->driver_gpios = NULL; -} -EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios); - -static void acpi_dev_release_driver_gpios(void *adev) -{ - acpi_dev_remove_driver_gpios(adev); -} - -int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios) -{ - struct acpi_device *adev = ACPI_COMPANION(dev); - int ret; - - ret = acpi_dev_add_driver_gpios(adev, gpios); - if (ret) - return ret; - - return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev); -} -EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios); - -static bool acpi_get_driver_gpio_data(struct acpi_device *adev, - const char *name, int index, - struct fwnode_reference_args *args, - unsigned int *quirks) -{ - const struct acpi_gpio_mapping *gm; - - if (!adev || !adev->driver_gpios) - return false; - - for (gm = adev->driver_gpios; gm->name; gm++) - if (!strcmp(name, gm->name) && gm->data && index < gm->size) { - const struct acpi_gpio_params *par = gm->data + index; - - args->fwnode = acpi_fwnode_handle(adev); - args->args[0] = par->crs_entry_index; - args->args[1] = par->line_index; - args->args[2] = par->active_low; - args->nargs = 3; - - *quirks = gm->quirks; - return true; - } - - return false; -} - -static int -__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update) -{ - const enum gpiod_flags mask = - GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | - GPIOD_FLAGS_BIT_DIR_VAL; - int ret = 0; - - /* - * Check if the BIOS has IoRestriction with explicitly set direction - * and update @flags accordingly. Otherwise use whatever caller asked - * for. - */ - if (update & GPIOD_FLAGS_BIT_DIR_SET) { - enum gpiod_flags diff = *flags ^ update; - - /* - * Check if caller supplied incompatible GPIO initialization - * flags. - * - * Return %-EINVAL to notify that firmware has different - * settings and we are going to use them. - */ - if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) || - ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL))) - ret = -EINVAL; - *flags = (*flags & ~mask) | (update & mask); - } - return ret; -} - -static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, - struct acpi_gpio_info *info) -{ - struct device *dev = &info->adev->dev; - enum gpiod_flags old = *flags; - int ret; - - ret = __acpi_gpio_update_gpiod_flags(&old, info->flags); - if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) { - if (ret) - dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n"); - } else { - if (ret) - dev_dbg(dev, "Override GPIO initialization flags\n"); - *flags = old; - } - - return ret; -} - -static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags, - struct acpi_gpio_info *info) -{ - switch (info->pin_config) { - case ACPI_PIN_CONFIG_PULLUP: - *lookupflags |= GPIO_PULL_UP; - break; - case ACPI_PIN_CONFIG_PULLDOWN: - *lookupflags |= GPIO_PULL_DOWN; - break; - case ACPI_PIN_CONFIG_NOPULL: - *lookupflags |= GPIO_PULL_DISABLE; - break; - default: - break; - } - - if (info->polarity == GPIO_ACTIVE_LOW) - *lookupflags |= GPIO_ACTIVE_LOW; - - return 0; -} - -struct acpi_gpio_lookup { - struct acpi_gpio_info info; - int index; - u16 pin_index; - bool active_low; - struct gpio_desc *desc; - int n; -}; - -static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data) -{ - struct acpi_gpio_lookup *lookup = data; - - if (ares->type != ACPI_RESOURCE_TYPE_GPIO) - return 1; - - if (!lookup->desc) { - const struct acpi_resource_gpio *agpio = &ares->data.gpio; - bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; - struct gpio_desc *desc; - u16 pin_index; - - if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint) - lookup->index++; - - if (lookup->n++ != lookup->index) - return 1; - - pin_index = lookup->pin_index; - if (pin_index >= agpio->pin_table_length) - return 1; - - if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER) - desc = gpio_to_desc(agpio->pin_table[pin_index]); - else - desc = acpi_get_gpiod(agpio->resource_source.string_ptr, - agpio->pin_table[pin_index]); - lookup->desc = desc; - lookup->info.pin_config = agpio->pin_config; - lookup->info.debounce = agpio->debounce_timeout; - lookup->info.gpioint = gpioint; - lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio); - - /* - * Polarity and triggering are only specified for GpioInt - * resource. - * Note: we expect here: - * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW - * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH - */ - if (lookup->info.gpioint) { - lookup->info.polarity = agpio->polarity; - lookup->info.triggering = agpio->triggering; - } else { - lookup->info.polarity = lookup->active_low; - } - - lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity); - } - - return 1; -} - -static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup, - struct acpi_gpio_info *info) -{ - struct acpi_device *adev = lookup->info.adev; - struct list_head res_list; - int ret; - - INIT_LIST_HEAD(&res_list); - - ret = acpi_dev_get_resources(adev, &res_list, - acpi_populate_gpio_lookup, - lookup); - if (ret < 0) - return ret; - - acpi_dev_free_resource_list(&res_list); - - if (!lookup->desc) - return -ENOENT; - - if (info) - *info = lookup->info; - return 0; -} - -static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode, - const char *propname, int index, - struct acpi_gpio_lookup *lookup) -{ - struct fwnode_reference_args args; - unsigned int quirks = 0; - int ret; - - memset(&args, 0, sizeof(args)); - ret = __acpi_node_get_property_reference(fwnode, propname, index, 3, - &args); - if (ret) { - struct acpi_device *adev; - - adev = to_acpi_device_node(fwnode); - if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks)) - return ret; - } - /* - * The property was found and resolved, so need to lookup the GPIO based - * on returned args. - */ - if (!to_acpi_device_node(args.fwnode)) - return -EINVAL; - if (args.nargs != 3) - return -EPROTO; - - lookup->index = args.args[0]; - lookup->pin_index = args.args[1]; - lookup->active_low = !!args.args[2]; - - lookup->info.adev = to_acpi_device_node(args.fwnode); - lookup->info.quirks = quirks; - - return 0; -} - -/** - * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources - * @adev: pointer to a ACPI device to get GPIO from - * @propname: Property name of the GPIO (optional) - * @index: index of GpioIo/GpioInt resource (starting from %0) - * @info: info pointer to fill in (optional) - * - * Function goes through ACPI resources for @adev and based on @index looks - * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor, - * and returns it. @index matches GpioIo/GpioInt resources only so if there - * are total %3 GPIO resources, the index goes from %0 to %2. - * - * If @propname is specified the GPIO is looked using device property. In - * that case @index is used to select the GPIO entry in the property value - * (in case of multiple). - * - * Returns: - * GPIO descriptor to use with Linux generic GPIO API. - * If the GPIO cannot be translated or there is an error an ERR_PTR is - * returned. - * - * Note: if the GPIO resource has multiple entries in the pin list, this - * function only returns the first. - */ -static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, - const char *propname, - int index, - struct acpi_gpio_info *info) -{ - struct acpi_gpio_lookup lookup; - int ret; - - memset(&lookup, 0, sizeof(lookup)); - lookup.index = index; - - if (propname) { - dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname); - - ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev), - propname, index, &lookup); - if (ret) - return ERR_PTR(ret); - - dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n", - dev_name(&lookup.info.adev->dev), lookup.index, - lookup.pin_index, lookup.active_low); - } else { - dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index); - lookup.info.adev = adev; - } - - ret = acpi_gpio_resource_lookup(&lookup, info); - return ret ? ERR_PTR(ret) : lookup.desc; -} - -/** - * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node - * @fwnode: pointer to an ACPI firmware node to get the GPIO information from - * @propname: Property name of the GPIO - * @index: index of GpioIo/GpioInt resource (starting from %0) - * @info: info pointer to fill in (optional) - * - * This function uses the property-based GPIO lookup to get to the GPIO - * resource with the relevant information from a data-only ACPI firmware node - * and uses that to obtain the GPIO descriptor to return. - * - * Returns: - * GPIO descriptor to use with Linux generic GPIO API. - * If the GPIO cannot be translated or there is an error an ERR_PTR is - * returned. - */ -static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode, - const char *propname, - int index, - struct acpi_gpio_info *info) -{ - struct acpi_gpio_lookup lookup; - int ret; - - if (!is_acpi_data_node(fwnode)) - return ERR_PTR(-ENODEV); - - if (!propname) - return ERR_PTR(-EINVAL); - - memset(&lookup, 0, sizeof(lookup)); - lookup.index = index; - - ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup); - if (ret) - return ERR_PTR(ret); - - ret = acpi_gpio_resource_lookup(&lookup, info); - return ret ? ERR_PTR(ret) : lookup.desc; -} - -static bool acpi_can_fallback_to_crs(struct acpi_device *adev, - const char *con_id) -{ - /* If there is no ACPI device, there is no _CRS to fall back to */ - if (!adev) - return false; - - /* Never allow fallback if the device has properties */ - if (acpi_dev_has_props(adev) || adev->driver_gpios) - return false; - - return con_id == NULL; -} - -static struct gpio_desc * -__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx, - bool can_fallback, struct acpi_gpio_info *info) -{ - struct acpi_device *adev = to_acpi_device_node(fwnode); - struct gpio_desc *desc; - char propname[32]; - - /* Try first from _DSD */ - for_each_gpio_property_name(propname, con_id) { - if (adev) - desc = acpi_get_gpiod_by_index(adev, - propname, idx, info); - else - desc = acpi_get_gpiod_from_data(fwnode, - propname, idx, info); - if (PTR_ERR(desc) == -EPROBE_DEFER) - return ERR_CAST(desc); - - if (!IS_ERR(desc)) - return desc; - } - - /* Then from plain _CRS GPIOs */ - if (can_fallback) - return acpi_get_gpiod_by_index(adev, NULL, idx, info); - - return ERR_PTR(-ENOENT); -} - -struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, - const char *con_id, - unsigned int idx, - enum gpiod_flags *dflags, - unsigned long *lookupflags) -{ - struct acpi_device *adev = to_acpi_device_node(fwnode); - bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); - struct acpi_gpio_info info; - struct gpio_desc *desc; - - desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); - if (IS_ERR(desc)) - return desc; - - if (info.gpioint && - (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) { - dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n"); - return ERR_PTR(-ENOENT); - } - - acpi_gpio_update_gpiod_flags(dflags, &info); - acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info); - return desc; -} - -/** - * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number - * @adev: pointer to a ACPI device to get IRQ from - * @con_id: optional name of GpioInt resource - * @index: index of GpioInt resource (starting from %0) - * @wake_capable: Set to true if the IRQ is wake capable - * - * If the device has one or more GpioInt resources, this function can be - * used to translate from the GPIO offset in the resource to the Linux IRQ - * number. - * - * The function is idempotent, though each time it runs it will configure GPIO - * pin direction according to the flags in GpioInt resource. - * - * The function takes optional @con_id parameter. If the resource has - * a @con_id in a property, then only those will be taken into account. - * - * The GPIO is considered wake capable if the GpioInt resource specifies - * SharedAndWake or ExclusiveAndWake. - * - * Returns: - * Linux IRQ number (> 0) on success, negative errno on failure. - */ -int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, - bool *wake_capable) -{ - struct fwnode_handle *fwnode = acpi_fwnode_handle(adev); - int idx, i; - unsigned int irq_flags; - int ret; - - for (i = 0, idx = 0; idx <= index; i++) { - struct acpi_gpio_info info; - struct gpio_desc *desc; - - /* Ignore -EPROBE_DEFER, it only matters if idx matches */ - desc = __acpi_find_gpio(fwnode, con_id, i, true, &info); - if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) - return PTR_ERR(desc); - - if (info.gpioint && idx++ == index) { - unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT; - enum gpiod_flags dflags = GPIOD_ASIS; - char label[32]; - int irq; - - if (IS_ERR(desc)) - return PTR_ERR(desc); - - irq = gpiod_to_irq(desc); - if (irq < 0) - return irq; - - acpi_gpio_update_gpiod_flags(&dflags, &info); - acpi_gpio_update_gpiod_lookup_flags(&lflags, &info); - - snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index); - ret = gpiod_set_consumer_name(desc, con_id ?: label); - if (ret) - return ret; - - ret = gpiod_configure_flags(desc, label, lflags, dflags); - if (ret < 0) - return ret; - - /* ACPI uses hundredths of milliseconds units */ - ret = gpio_set_debounce_timeout(desc, info.debounce * 10); - if (ret) - return ret; - - irq_flags = acpi_dev_get_irq_type(info.triggering, - info.polarity); - - /* - * If the IRQ is not already in use then set type - * if specified and different than the current one. - */ - if (can_request_irq(irq, irq_flags)) { - if (irq_flags != IRQ_TYPE_NONE && - irq_flags != irq_get_trigger_type(irq)) - irq_set_irq_type(irq, irq_flags); - } else { - dev_dbg(&adev->dev, "IRQ %d already in use\n", irq); - } - - /* avoid suspend issues with GPIOs when systems are using S3 */ - if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) - *wake_capable = info.wake_capable; - - return irq; - } - - } - return -ENOENT; -} -EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by); - -static acpi_status -acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, - u32 bits, u64 *value, void *handler_context, - void *region_context) -{ - struct acpi_gpio_chip *achip = region_context; - struct gpio_chip *chip = achip->chip; - struct acpi_resource_gpio *agpio; - struct acpi_resource *ares; - u16 pin_index = address; - acpi_status status; - int length; - int i; - - status = acpi_buffer_to_resource(achip->conn_info.connection, - achip->conn_info.length, &ares); - if (ACPI_FAILURE(status)) - return status; - - if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) { - ACPI_FREE(ares); - return AE_BAD_PARAMETER; - } - - agpio = &ares->data.gpio; - - if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT && - function == ACPI_WRITE)) { - ACPI_FREE(ares); - return AE_BAD_PARAMETER; - } - - length = min_t(u16, agpio->pin_table_length, pin_index + bits); - for (i = pin_index; i < length; ++i) { - unsigned int pin = agpio->pin_table[i]; - struct acpi_gpio_connection *conn; - struct gpio_desc *desc; - bool found; - - mutex_lock(&achip->conn_lock); - - found = false; - list_for_each_entry(conn, &achip->conns, node) { - if (conn->pin == pin) { - found = true; - desc = conn->desc; - break; - } - } - - /* - * The same GPIO can be shared between operation region and - * event but only if the access here is ACPI_READ. In that - * case we "borrow" the event GPIO instead. - */ - if (!found && agpio->shareable == ACPI_SHARED && - function == ACPI_READ) { - struct acpi_gpio_event *event; - - list_for_each_entry(event, &achip->events, node) { - if (event->pin == pin) { - desc = event->desc; - found = true; - break; - } - } - } - - if (!found) { - desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion"); - if (IS_ERR(desc)) { - mutex_unlock(&achip->conn_lock); - status = AE_ERROR; - goto out; - } - - conn = kzalloc(sizeof(*conn), GFP_KERNEL); - if (!conn) { - gpiochip_free_own_desc(desc); - mutex_unlock(&achip->conn_lock); - status = AE_NO_MEMORY; - goto out; - } - - conn->pin = pin; - conn->desc = desc; - list_add_tail(&conn->node, &achip->conns); - } - - mutex_unlock(&achip->conn_lock); - - if (function == ACPI_WRITE) - gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i))); - else - *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i; - } - -out: - ACPI_FREE(ares); - return status; -} - -static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip) -{ - struct gpio_chip *chip = achip->chip; - acpi_handle handle = ACPI_HANDLE(chip->parent); - acpi_status status; - - INIT_LIST_HEAD(&achip->conns); - mutex_init(&achip->conn_lock); - status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO, - acpi_gpio_adr_space_handler, - NULL, achip); - if (ACPI_FAILURE(status)) - dev_err(chip->parent, - "Failed to install GPIO OpRegion handler\n"); -} - -static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip) -{ - struct gpio_chip *chip = achip->chip; - acpi_handle handle = ACPI_HANDLE(chip->parent); - struct acpi_gpio_connection *conn, *tmp; - acpi_status status; - - status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO, - acpi_gpio_adr_space_handler); - if (ACPI_FAILURE(status)) { - dev_err(chip->parent, - "Failed to remove GPIO OpRegion handler\n"); - return; - } - - list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) { - gpiochip_free_own_desc(conn->desc); - list_del(&conn->node); - kfree(conn); - } -} - -static struct gpio_desc * -acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip, - struct fwnode_handle *fwnode, - const char **name, - unsigned long *lflags, - enum gpiod_flags *dflags) -{ - struct gpio_chip *chip = achip->chip; - struct gpio_desc *desc; - u32 gpios[2]; - int ret; - - *lflags = GPIO_LOOKUP_FLAGS_DEFAULT; - *dflags = GPIOD_ASIS; - *name = NULL; - - ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios, - ARRAY_SIZE(gpios)); - if (ret < 0) - return ERR_PTR(ret); - - desc = gpiochip_get_desc(chip, gpios[0]); - if (IS_ERR(desc)) - return desc; - - if (gpios[1]) - *lflags |= GPIO_ACTIVE_LOW; - - if (fwnode_property_present(fwnode, "input")) - *dflags |= GPIOD_IN; - else if (fwnode_property_present(fwnode, "output-low")) - *dflags |= GPIOD_OUT_LOW; - else if (fwnode_property_present(fwnode, "output-high")) - *dflags |= GPIOD_OUT_HIGH; - else - return ERR_PTR(-EINVAL); - - fwnode_property_read_string(fwnode, "line-name", name); - - return desc; -} - -static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip) -{ - struct gpio_chip *chip = achip->chip; - struct fwnode_handle *fwnode; - - device_for_each_child_node(chip->parent, fwnode) { - unsigned long lflags; - enum gpiod_flags dflags; - struct gpio_desc *desc; - const char *name; - int ret; - - if (!fwnode_property_present(fwnode, "gpio-hog")) - continue; - - desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name, - &lflags, &dflags); - if (IS_ERR(desc)) - continue; - - ret = gpiod_hog(desc, name, lflags, dflags); - if (ret) { - dev_err(chip->parent, "Failed to hog GPIO\n"); - fwnode_handle_put(fwnode); - return; - } - } -} - -void acpi_gpiochip_add(struct gpio_chip *chip) -{ - struct acpi_gpio_chip *acpi_gpio; - struct acpi_device *adev; - acpi_status status; - - if (!chip || !chip->parent) - return; - - adev = ACPI_COMPANION(chip->parent); - if (!adev) - return; - - acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL); - if (!acpi_gpio) { - dev_err(chip->parent, - "Failed to allocate memory for ACPI GPIO chip\n"); - return; - } - - acpi_gpio->chip = chip; - INIT_LIST_HEAD(&acpi_gpio->events); - INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry); - - status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio); - if (ACPI_FAILURE(status)) { - dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n"); - kfree(acpi_gpio); - return; - } - - acpi_gpiochip_request_regions(acpi_gpio); - acpi_gpiochip_scan_gpios(acpi_gpio); - acpi_dev_clear_dependencies(adev); -} - -void acpi_gpiochip_remove(struct gpio_chip *chip) -{ - struct acpi_gpio_chip *acpi_gpio; - acpi_handle handle; - acpi_status status; - - if (!chip || !chip->parent) - return; - - handle = ACPI_HANDLE(chip->parent); - if (!handle) - return; - - status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); - if (ACPI_FAILURE(status)) { - dev_warn(chip->parent, "Failed to retrieve ACPI GPIO chip\n"); - return; - } - - acpi_gpiochip_free_regions(acpi_gpio); - - acpi_detach_data(handle, acpi_gpio_chip_dh); - kfree(acpi_gpio); -} - -static int acpi_gpio_package_count(const union acpi_object *obj) -{ - const union acpi_object *element = obj->package.elements; - const union acpi_object *end = element + obj->package.count; - unsigned int count = 0; - - while (element < end) { - switch (element->type) { - case ACPI_TYPE_LOCAL_REFERENCE: - element += 3; - fallthrough; - case ACPI_TYPE_INTEGER: - element++; - count++; - break; - - default: - return -EPROTO; - } - } - - return count; -} - -static int acpi_find_gpio_count(struct acpi_resource *ares, void *data) -{ - unsigned int *count = data; - - if (ares->type == ACPI_RESOURCE_TYPE_GPIO) - *count += ares->data.gpio.pin_table_length; - - return 1; -} - -/** - * acpi_gpio_count - count the GPIOs associated with a firmware node / function - * @fwnode: firmware node of the GPIO consumer - * @con_id: function within the GPIO consumer - * - * Returns: - * The number of GPIOs associated with a firmware node / function or %-ENOENT, - * if no GPIO has been assigned to the requested function. - */ -int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) -{ - struct acpi_device *adev = to_acpi_device_node(fwnode); - const union acpi_object *obj; - const struct acpi_gpio_mapping *gm; - int count = -ENOENT; - int ret; - char propname[32]; - - /* Try first from _DSD */ - for_each_gpio_property_name(propname, con_id) { - ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj); - if (ret == 0) { - if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) - count = 1; - else if (obj->type == ACPI_TYPE_PACKAGE) - count = acpi_gpio_package_count(obj); - } else if (adev->driver_gpios) { - for (gm = adev->driver_gpios; gm->name; gm++) - if (strcmp(propname, gm->name) == 0) { - count = gm->size; - break; - } - } - if (count > 0) - break; - } - - /* Then from plain _CRS GPIOs */ - if (count < 0) { - struct list_head resource_list; - unsigned int crs_count = 0; - - if (!acpi_can_fallback_to_crs(adev, con_id)) - return count; - - INIT_LIST_HEAD(&resource_list); - acpi_dev_get_resources(adev, &resource_list, - acpi_find_gpio_count, &crs_count); - acpi_dev_free_resource_list(&resource_list); - if (crs_count > 0) - count = crs_count; - } - return count ? count : -ENOENT; -} - -/* Run deferred acpi_gpiochip_request_irqs() */ -static int __init acpi_gpio_handle_deferred_request_irqs(void) -{ - struct acpi_gpio_chip *acpi_gpio, *tmp; - - mutex_lock(&acpi_gpio_deferred_req_irqs_lock); - list_for_each_entry_safe(acpi_gpio, tmp, - &acpi_gpio_deferred_req_irqs_list, - deferred_req_irqs_list_entry) - acpi_gpiochip_request_irqs(acpi_gpio); - - acpi_gpio_deferred_req_irqs_done = true; - mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); - - return 0; -} -/* We must use _sync so that this runs after the first deferred_probe run */ -late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); - -static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { - { - /* - * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for - * a non existing micro-USB-B connector which puts the HDMI - * DDC pins in GPIO mode, breaking HDMI support. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), - DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .no_edge_events_on_boot = true, - }, - }, - { - /* - * The Terra Pad 1061 has a micro-USB-B id-pin handler, which - * instead of controlling the actual micro-USB-B turns the 5V - * boost for its USB-A connector off. The actual micro-USB-B - * connector is wired for charging only. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), - DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .no_edge_events_on_boot = true, - }, - }, - { - /* - * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an - * external embedded-controller connected via I2C + an ACPI GPIO - * event handler on INT33FFC:02 pin 12, causing spurious wakeups. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "INT33FC:02@12", - }, - }, - { - /* - * HP X2 10 models with Cherry Trail SoC + TI PMIC use an - * external embedded-controller connected via I2C + an ACPI GPIO - * event handler on INT33FF:01 pin 0, causing spurious wakeups. - * When suspending by closing the LID, the power to the USB - * keyboard is turned off, causing INT0002 ACPI events to - * trigger once the XHCI controller notices the keyboard is - * gone. So INT0002 events cause spurious wakeups too. Ignoring - * EC wakes breaks wakeup when opening the lid, the user needs - * to press the power-button to wakeup the system. The - * alternative is suspend simply not working, which is worse. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "INT33FF:01@0,INT0002:00@2", - }, - }, - { - /* - * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an - * external embedded-controller connected via I2C + an ACPI GPIO - * event handler on INT33FC:02 pin 28, causing spurious wakeups. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), - DMI_MATCH(DMI_BOARD_NAME, "815D"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "INT33FC:02@28", - }, - }, - { - /* - * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an - * external embedded-controller connected via I2C + an ACPI GPIO - * event handler on INT33FF:01 pin 0, causing spurious wakeups. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), - DMI_MATCH(DMI_BOARD_NAME, "813E"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "INT33FF:01@0", - }, - }, - { - /* - * Interrupt storm caused from edge triggered floating pin - * Found in BIOS UX325UAZ.300 - * https://bugzilla.kernel.org/show_bug.cgi?id=216208 - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_interrupt = "AMDI0030:00@18", - }, - }, - { - /* - * Spurious wakeups from TP_ATTN# pin - * Found in BIOS 1.7.8 - * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627 - */ - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "ELAN0415:00@9", - }, - }, - { - /* - * Spurious wakeups from TP_ATTN# pin - * Found in BIOS 1.7.8 - * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627 - */ - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "ELAN0415:00@9", - }, - }, - { - /* - * Spurious wakeups from TP_ATTN# pin - * Found in BIOS 1.7.7 - */ - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "SYNA1202:00@16", - }, - }, - { - /* - * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to - * a "dolby" button. At the ACPI level an _AEI event-handler - * is connected which sets an ACPI variable to 1 on both - * edges. This variable can be polled + cleared to 0 using - * WMI. But since the variable is set on both edges the WMI - * interface is pretty useless even when polling. - * So instead the x86-android-tablets code instantiates - * a gpio-keys platform device for it. - * Ignore the _AEI handler for the pin, so that it is not busy. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), - DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_interrupt = "INT33FC:00@3", - }, - }, - { - /* - * Spurious wakeups from TP_ATTN# pin - * Found in BIOS 0.35 - * https://gitlab.freedesktop.org/drm/amd/-/issues/3073 - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GPD"), - DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_wake = "PNP0C50:00@8", - }, - }, - { - /* - * Spurious wakeups from GPIO 11 - * Found in BIOS 1.04 - * https://gitlab.freedesktop.org/drm/amd/-/issues/3954 - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"), - }, - .driver_data = &(struct acpi_gpiolib_dmi_quirk) { - .ignore_interrupt = "AMDI0030:00@11", - }, - }, - {} /* Terminating entry */ -}; - -static int __init acpi_gpio_setup_params(void) -{ - const struct acpi_gpiolib_dmi_quirk *quirk = NULL; - const struct dmi_system_id *id; - - id = dmi_first_match(gpiolib_acpi_quirks); - if (id) - quirk = id->driver_data; - - if (run_edge_events_on_boot < 0) { - if (quirk && quirk->no_edge_events_on_boot) - run_edge_events_on_boot = 0; - else - run_edge_events_on_boot = 1; - } - - if (ignore_wake == NULL && quirk && quirk->ignore_wake) - ignore_wake = quirk->ignore_wake; - - if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt) - ignore_interrupt = quirk->ignore_interrupt; - - return 0; -} - -/* Directly after dmi_setup() which runs as core_initcall() */ -postcore_initcall(acpi_gpio_setup_params); diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h index 7e1c51d04040..a90267470a4e 100644 --- a/drivers/gpio/gpiolib-acpi.h +++ b/drivers/gpio/gpiolib-acpi.h @@ -58,4 +58,19 @@ static inline int acpi_gpio_count(const struct fwnode_handle *fwnode, } #endif
+void acpi_gpio_process_deferred_list(struct list_head *list); + +bool acpi_gpio_add_to_deferred_list(struct list_head *list); +void acpi_gpio_remove_from_deferred_list(struct list_head *list); + +int acpi_gpio_need_run_edge_events_on_boot(void); + +enum acpi_gpio_ignore_list { + ACPI_GPIO_IGNORE_WAKE, + ACPI_GPIO_IGNORE_INTERRUPT, +}; + +bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list, + const char *controller_in, unsigned int pin_in); + #endif /* GPIOLIB_ACPI_H */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9e1716a3f70b..a3d448148194 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3092,11 +3092,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) continue; - /* skip CG for VCE/UVD/VPE, it's handled specially */ + /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && - adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && adev->ip_blocks[i].version->funcs->set_powergating_state) { /* enable powergating to save power */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1c8ac4cf08c5..af729cd521ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1513,6 +1513,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, if (r) goto out;
+ mutex_lock(&adev->mman.gtt_window_lock); amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; @@ -1527,6 +1528,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job); + mutex_unlock(&adev->mman.gtt_window_lock);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) r = -ETIMEDOUT; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 87aaf5f1224f..abbf49c90e57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -103,12 +103,39 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; + bool retry_fault = !!(entry->src_data[1] & 0x80); + bool write_fault = !!(entry->src_data[1] & 0x20); uint32_t status = 0; u64 addr;
addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it only if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index 525e435ee22d..729f343c17a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -91,6 +91,8 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct amdgpu_vmhub *hub; + bool retry_fault = !!(entry->src_data[1] & 0x80); + bool write_fault = !!(entry->src_data[1] & 0x20); uint32_t status = 0; u64 addr;
@@ -102,6 +104,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, else hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ if (retry_fault) { + /* Returning 1 here also prevents sending the IV to the KFD */ + + /* Process it only if it's the first fault for this address */ + if (entry->ih != &adev->irq.ih_soft && + amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, + entry->timestamp)) + return 1; + + /* Delegate it to a different ring if the hardware hasn't + * already done it. + */ + if (entry->ih == &adev->irq.ih) { + amdgpu_irq_delegate(adev, entry, 8); + return 1; + } + + /* Try to handle the recoverable page faults by filling page + * tables + */ + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, + entry->timestamp, write_fault)) + return 1; + } + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 6c8c9935a0f2..c810ca5cacae 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -3640,14 +3640,18 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { };
static const uint32_t cwsr_trap_gfx12_hex[] = { - 0xbfa00001, 0xbfa002a2, - 0xb0804009, 0xb8f8f804, + 0xbfa00001, 0xbfa002b2, + 0xb0804009, 0xb8eef81a, + 0xbf880000, 0xb980081a, + 0x00000000, 0xb8f8f804, + 0x9177ff77, 0x0c000000, + 0x846e9a6e, 0x8c776e77, 0x9178ff78, 0x00008c00, 0xb8fbf811, 0x8b6eff78, 0x00004000, 0xbfa10008, 0x8b6eff7b, 0x00000080, 0xbfa20018, 0x8b6ea07b, - 0xbfa20042, 0xbf830010, + 0xbfa2004a, 0xbf830010, 0xb8fbf811, 0xbfa0fffb, 0x8b6eff7b, 0x00000bd0, 0xbfa20010, 0xb8eef812, @@ -3658,28 +3662,32 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0xf0000000, 0xbfa20005, 0x8b6fff6f, 0x00000200, 0xbfa20002, 0x8b6ea07b, - 0xbfa2002c, 0xbefa4d82, + 0xbfa20034, 0xbefa4d82, 0xbf8a0000, 0x84fa887a, 0xbf0d8f7b, 0xbfa10002, 0x8c7bff7b, 0xffff0000, - 0xf4601bbd, 0xf8000010, - 0xbf8a0000, 0x846e976e, - 0x9177ff77, 0x00800000, - 0x8c776e77, 0xf4603bbd, - 0xf8000000, 0xbf8a0000, - 0xf4603ebd, 0xf8000008, - 0xbf8a0000, 0x8bee6e6e, - 0xbfa10001, 0xbe80486e, - 0x8b6eff6d, 0xf0000000, - 0xbfa20009, 0xb8eef811, - 0x8b6eff6e, 0x00000080, - 0xbfa20007, 0x8c78ff78, - 0x00004000, 0x80ec886c, - 0x82ed806d, 0xbfa00002, - 0x806c846c, 0x826d806d, - 0x8b6dff6d, 0x0000ffff, - 0x8bfe7e7e, 0x8bea6a6a, - 0x85788978, 0xb9783244, + 0x8b6eff77, 0x0c000000, + 0x916dff6d, 0x0c000000, + 0x8c6d6e6d, 0xf4601bbd, + 0xf8000010, 0xbf8a0000, + 0x846e976e, 0x9177ff77, + 0x00800000, 0x8c776e77, + 0xf4603bbd, 0xf8000000, + 0xbf8a0000, 0xf4603ebd, + 0xf8000008, 0xbf8a0000, + 0x8bee6e6e, 0xbfa10001, + 0xbe80486e, 0x8b6eff6d, + 0xf0000000, 0xbfa20009, + 0xb8eef811, 0x8b6eff6e, + 0x00000080, 0xbfa20007, + 0x8c78ff78, 0x00004000, + 0x80ec886c, 0x82ed806d, + 0xbfa00002, 0x806c846c, + 0x826d806d, 0x8b6dff6d, + 0x0000ffff, 0x8bfe7e7e, + 0x8bea6a6a, 0x85788978, + 0x936eff77, 0x0002001a, + 0xb96ef81a, 0xb9783244, 0xbe804a6c, 0xb8faf802, 0xbf0d987a, 0xbfa10001, 0xbfb00000, 0x8b6dff6d, @@ -3977,7 +3985,7 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0x008ce800, 0x00000000, 0x807d817d, 0x8070ff70, 0x00000080, 0xbf0a7b7d, - 0xbfa2fff7, 0xbfa0016e, + 0xbfa2fff7, 0xbfa00171, 0xbef4007e, 0x8b75ff7f, 0x0000ffff, 0x8c75ff75, 0x00040000, 0xbef60080, @@ -4159,10 +4167,12 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0xf8000074, 0xbf8a0000, 0x8b6dff6d, 0x0000ffff, 0x8bfe7e7e, 0x8bea6a6a, - 0xb97af804, 0xbe804ec2, - 0xbf94fffe, 0xbe804a6c, + 0x936eff77, 0x0002001a, + 0xb96ef81a, 0xb97af804, 0xbe804ec2, 0xbf94fffe, - 0xbfb10000, 0xbf9f0000, + 0xbe804a6c, 0xbe804ec2, + 0xbf94fffe, 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm index 5a1a1b1f897f..07999b4649de 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm @@ -78,9 +78,16 @@ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT + +var SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT = 0 +var SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE = 2 + var BARRIER_STATE_SIGNAL_OFFSET = 16 var BARRIER_STATE_VALID_OFFSET = 0
+var TTMP11_SCHED_MODE_SHIFT = 26 +var TTMP11_SCHED_MODE_SIZE = 2 +var TTMP11_SCHED_MODE_MASK = 0xC000000 var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23 var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
@@ -160,8 +167,19 @@ L_JUMP_TO_RESTORE: s_branch L_RESTORE
L_SKIP_RESTORE: + // Assume most relaxed scheduling mode is set. Save and revert to normal mode. + s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_SCHED_MODE) + s_wait_alu 0 + s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE, \ + SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT, SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE), 0 + s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+ // Save SCHED_MODE[1:0] into ttmp11[27:26]. + s_andn2_b32 ttmp11, ttmp11, TTMP11_SCHED_MODE_MASK + s_lshl_b32 ttmp2, ttmp2, TTMP11_SCHED_MODE_SHIFT + s_or_b32 ttmp11, ttmp11, ttmp2 + // Clear SPI_PRIO: do not save with elevated priority. // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd. s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK @@ -238,6 +256,13 @@ L_FETCH_2ND_TRAP: s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA s_or_b32 ttmp15, ttmp15, 0xFFFF0000 L_NO_SIGN_EXTEND_TMA: +#if ASIC_FAMILY == CHIP_GFX12 + // Move SCHED_MODE[1:0] from ttmp11 to unused bits in ttmp1[27:26] (return PC_HI). + // The second-level trap will restore from ttmp1 for backwards compatibility. + s_and_b32 ttmp2, ttmp11, TTMP11_SCHED_MODE_MASK + s_andn2_b32 ttmp1, ttmp1, TTMP11_SCHED_MODE_MASK + s_or_b32 ttmp1, ttmp1, ttmp2 +#endif
s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag s_wait_idle @@ -287,6 +312,10 @@ L_EXIT_TRAP: // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it. // Only restore fields which the trap handler changes. s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT + + // Assume relaxed scheduling mode after this point. + restore_sched_mode(ttmp2) + s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
@@ -1043,6 +1072,9 @@ L_SKIP_BARRIER_RESTORE: s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ // Assume relaxed scheduling mode after this point. + restore_sched_mode(s_restore_tmp) + s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
// Make barrier and LDS state visible to all waves in the group. @@ -1134,3 +1166,8 @@ function valu_sgpr_hazard end #endif end + +function restore_sched_mode(s_tmp) + s_bfe_u32 s_tmp, ttmp11, (TTMP11_SCHED_MODE_SHIFT | (TTMP11_SCHED_MODE_SIZE << 0x10)) + s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE), s_tmp +end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index 94937b824e98..0c6ef2919d87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -408,6 +408,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv) vgpr_size = 0x80000; else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */ gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */ + gfxv == 110501 || /* GFX_VERSION_GFX1151 */ gfxv == 120000 || /* GFX_VERSION_GFX1200 */ gfxv == 120001) /* GFX_VERSION_GFX1201 */ vgpr_size = 0x60000; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 82da568604b6..7203fb32989b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -509,6 +509,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.num_sdma_queues_per_engine); sysfs_show_32bit_prop(buffer, offs, "num_cp_queues", dev->node_props.num_cp_queues); + sysfs_show_32bit_prop(buffer, offs, "cwsr_size", + dev->node_props.cwsr_size); + sysfs_show_32bit_prop(buffer, offs, "ctl_stack_size", + dev->node_props.ctl_stack_size);
if (dev->gpu) { log_max_watch_addr = diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f06aaa6f1817..a2a70c1e9afd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -846,28 +846,26 @@ struct dsc_mst_fairness_params { };
#if defined(CONFIG_DRM_AMD_DC_FP) -static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn) +static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) { - uint64_t effective_kbps = (uint64_t)kbps; + u8 link_coding_cap; + uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
- if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps - effective_kbps *= 1006; - effective_kbps = div_u64(effective_kbps, 1000); - } + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); + if (link_coding_cap == DP_128b_132b_ENCODING) + fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
- return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000)); + return fec_overhead_multiplier_x1000; }
-static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin) +static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) { - uint64_t pbn_effective = (uint64_t)pbn; - - if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn - pbn_effective *= (1000000 / PEAK_FACTOR_X1000); - else - pbn_effective *= 1000; + u64 peak_kbps = kbps;
- return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64); + peak_kbps *= 1006; + peak_kbps *= fec_overhead_multiplier_x1000; + peak_kbps = div_u64(peak_kbps, 1000 * 1000); + return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); }
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, @@ -938,7 +936,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = pbn_to_kbps(pbn, false); + kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, @@ -967,11 +965,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, int link_timeslots_used; int fair_pbn_alloc; int ret = 0; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn; + kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -1067,6 +1066,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, int next_index; int remaining_to_try = 0; int ret; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); int var_pbn;
for (i = 0; i < count; i++) { @@ -1099,7 +1099,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); var_pbn = vars[next_index].pbn; - vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true); + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1159,6 +1159,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int count = 0; int i, k, ret; bool debugfs_overwrite = false; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params)); @@ -1239,7 +1240,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, @@ -1261,7 +1262,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1269,7 +1270,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret < 0) return ret; } else { - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1721,6 +1722,18 @@ int pre_validate_dsc(struct drm_atomic_state *state, return ret; }
+static uint32_t kbps_from_pbn(unsigned int pbn) +{ + uint64_t kbps = (uint64_t)pbn; + + kbps *= (1000000 / PEAK_FACTOR_X1000); + kbps *= 8; + kbps *= 54; + kbps /= 64; + + return (uint32_t)kbps; +} + static bool is_dsc_common_config_possible(struct dc_stream_state *stream, struct dc_dsc_bw_range *bw_range) { @@ -1812,7 +1825,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( dc_link_get_highest_encoding_format(stream->link)); cur_link_settings = stream->link->verified_link_cap; root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); - virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true); + virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
/* pick the end to end bw bottleneck */ end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); @@ -1863,7 +1876,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) { - virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true); + virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); } else { /* For topology LCT 1 case - only one mstb*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ccbb15f1638c..01b065910468 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -93,7 +93,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, struct dc_plane_state *dc_create_plane_state(const struct dc *dc) { struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), - GFP_KERNEL); + GFP_ATOMIC);
if (NULL == plane_state) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index d0c4693c1224..92d5e3252d2d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -203,12 +203,12 @@ enum dcn35_clk_src_array_id { NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\ - REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name + REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name
#define NBIO_SR_ARR(reg_name, id)\ - REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name + REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name
#define bios_regs_init() \ ( \ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 575c0aa12229..6e27226cec28 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -183,12 +183,12 @@ enum dcn351_clk_src_array_id { NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\ - REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name + REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name
#define NBIO_SR_ARR(reg_name, id)\ - REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name + REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name
#define bios_regs_init() \ ( \ diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 16dea3f2fb11..7debf079c943 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -9,8 +9,17 @@
#include <drm/drm_buddy.h>
+enum drm_buddy_free_tree { + DRM_BUDDY_CLEAR_TREE = 0, + DRM_BUDDY_DIRTY_TREE, + DRM_BUDDY_MAX_FREE_TREES, +}; + static struct kmem_cache *slab_blocks;
+#define for_each_free_tree(tree) \ + for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) + static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, struct drm_buddy_block *parent, unsigned int order, @@ -28,6 +37,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, block->header |= order; block->parent = parent;
+ RB_CLEAR_NODE(&block->rb); + BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); return block; } @@ -38,23 +49,64 @@ static void drm_block_free(struct drm_buddy *mm, kmem_cache_free(slab_blocks, block); }
-static void list_insert_sorted(struct drm_buddy *mm, - struct drm_buddy_block *block) +static enum drm_buddy_free_tree +get_block_tree(struct drm_buddy_block *block) { - struct drm_buddy_block *node; - struct list_head *head; + return drm_buddy_block_is_clear(block) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; +}
- head = &mm->free_list[drm_buddy_block_order(block)]; - if (list_empty(head)) { - list_add(&block->link, head); - return; - } +static struct drm_buddy_block * +rbtree_get_free_block(const struct rb_node *node) +{ + return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; +}
- list_for_each_entry(node, head, link) - if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) - break; +static struct drm_buddy_block * +rbtree_last_free_block(struct rb_root *root) +{ + return rbtree_get_free_block(rb_last(root)); +}
- __list_add(&block->link, node->link.prev, &node->link); +static bool rbtree_is_empty(struct rb_root *root) +{ + return RB_EMPTY_ROOT(root); +} + +static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, + const struct drm_buddy_block *node) +{ + return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); +} + +static bool rbtree_block_offset_less(struct rb_node *block, + const struct rb_node *node) +{ + return drm_buddy_block_offset_less(rbtree_get_free_block(block), + rbtree_get_free_block(node)); +} + +static void rbtree_insert(struct drm_buddy *mm, + struct drm_buddy_block *block, + enum drm_buddy_free_tree tree) +{ + rb_add(&block->rb, + &mm->free_trees[tree][drm_buddy_block_order(block)], + rbtree_block_offset_less); +} + +static void rbtree_remove(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + unsigned int order = drm_buddy_block_order(block); + enum drm_buddy_free_tree tree; + struct rb_root *root; + + tree = get_block_tree(block); + root = &mm->free_trees[tree][order]; + + rb_erase(&block->rb, root); + RB_CLEAR_NODE(&block->rb); }
static void clear_reset(struct drm_buddy_block *block) @@ -67,29 +119,34 @@ static void mark_cleared(struct drm_buddy_block *block) block->header |= DRM_BUDDY_HEADER_CLEAR; }
-static void mark_allocated(struct drm_buddy_block *block) +static void mark_allocated(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_ALLOCATED;
- list_del(&block->link); + rbtree_remove(mm, block); }
static void mark_free(struct drm_buddy *mm, struct drm_buddy_block *block) { + enum drm_buddy_free_tree tree; + block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_FREE;
- list_insert_sorted(mm, block); + tree = get_block_tree(block); + rbtree_insert(mm, block, tree); }
-static void mark_split(struct drm_buddy_block *block) +static void mark_split(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_SPLIT;
- list_del(&block->link); + rbtree_remove(mm, block); }
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) @@ -145,7 +202,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm, mark_cleared(parent); }
- list_del(&buddy->link); + rbtree_remove(mm, buddy); if (force_merge && drm_buddy_block_is_clear(buddy)) mm->clear_avail -= drm_buddy_block_size(mm, buddy);
@@ -166,7 +223,7 @@ static int __force_merge(struct drm_buddy *mm, u64 end, unsigned int min_order) { - unsigned int order; + unsigned int tree, order; int i;
if (!min_order) @@ -175,44 +232,48 @@ static int __force_merge(struct drm_buddy *mm, if (min_order > mm->max_order) return -EINVAL;
- for (i = min_order - 1; i >= 0; i--) { - struct drm_buddy_block *block, *prev; + for_each_free_tree(tree) { + for (i = min_order - 1; i >= 0; i--) { + struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { - struct drm_buddy_block *buddy; - u64 block_start, block_end; + while (iter) { + struct drm_buddy_block *block, *buddy; + u64 block_start, block_end;
- if (!block->parent) - continue; + block = rbtree_get_free_block(iter); + iter = rb_prev(iter);
- block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + if (!block || !block->parent) + continue;
- if (!contains(start, end, block_start, block_end)) - continue; + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1;
- buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) - continue; + if (!contains(start, end, block_start, block_end)) + continue;
- WARN_ON(drm_buddy_block_is_clear(block) == - drm_buddy_block_is_clear(buddy)); + buddy = __get_buddy(block); + if (!drm_buddy_block_is_free(buddy)) + continue;
- /* - * If the prev block is same as buddy, don't access the - * block in the next iteration as we would free the - * buddy block as part of the free function. - */ - if (prev == buddy) - prev = list_prev_entry(prev, link); + WARN_ON(drm_buddy_block_is_clear(block) == + drm_buddy_block_is_clear(buddy));
- list_del(&block->link); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + /* + * Advance to the next node when the current node is the buddy, + * as freeing the block will also remove its buddy from the tree. + */ + if (iter == &buddy->rb) + iter = rb_prev(iter);
- order = __drm_buddy_free(mm, block, true); - if (order >= min_order) - return 0; + rbtree_remove(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + + order = __drm_buddy_free(mm, block, true); + if (order >= min_order) + return 0; + } } }
@@ -233,8 +294,8 @@ static int __force_merge(struct drm_buddy *mm, */ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) { - unsigned int i; - u64 offset; + unsigned int i, j, root_count = 0; + u64 offset = 0;
if (size < chunk_size) return -EINVAL; @@ -255,14 +316,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_list = kmalloc_array(mm->max_order + 1, - sizeof(struct list_head), - GFP_KERNEL); - if (!mm->free_list) + mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, + sizeof(*mm->free_trees), + GFP_KERNEL); + if (!mm->free_trees) return -ENOMEM;
- for (i = 0; i <= mm->max_order; ++i) - INIT_LIST_HEAD(&mm->free_list[i]); + for_each_free_tree(i) { + mm->free_trees[i] = kmalloc_array(mm->max_order + 1, + sizeof(struct rb_root), + GFP_KERNEL); + if (!mm->free_trees[i]) + goto out_free_tree; + + for (j = 0; j <= mm->max_order; ++j) + mm->free_trees[i][j] = RB_ROOT; + }
mm->n_roots = hweight64(size);
@@ -270,10 +339,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) sizeof(struct drm_buddy_block *), GFP_KERNEL); if (!mm->roots) - goto out_free_list; - - offset = 0; - i = 0; + goto out_free_tree;
/* * Split into power-of-two blocks, in case we are given a size that is @@ -293,24 +359,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
mark_free(mm, root);
- BUG_ON(i > mm->max_order); + BUG_ON(root_count > mm->max_order); BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
- mm->roots[i] = root; + mm->roots[root_count] = root;
offset += root_size; size -= root_size; - i++; + root_count++; } while (size);
return 0;
out_free_roots: - while (i--) - drm_block_free(mm, mm->roots[i]); + while (root_count--) + drm_block_free(mm, mm->roots[root_count]); kfree(mm->roots); -out_free_list: - kfree(mm->free_list); +out_free_tree: + while (i--) + kfree(mm->free_trees[i]); + kfree(mm->free_trees); return -ENOMEM; } EXPORT_SYMBOL(drm_buddy_init); @@ -320,7 +388,7 @@ EXPORT_SYMBOL(drm_buddy_init); * * @mm: DRM buddy manager to free * - * Cleanup memory manager resources and the freelist + * Cleanup memory manager resources and the freetree */ void drm_buddy_fini(struct drm_buddy *mm) { @@ -344,8 +412,9 @@ void drm_buddy_fini(struct drm_buddy *mm)
WARN_ON(mm->avail != mm->size);
+ for_each_free_tree(i) + kfree(mm->free_trees[i]); kfree(mm->roots); - kfree(mm->free_list); } EXPORT_SYMBOL(drm_buddy_fini);
@@ -369,8 +438,7 @@ static int split_block(struct drm_buddy *mm, return -ENOMEM; }
- mark_free(mm, block->left); - mark_free(mm, block->right); + mark_split(mm, block);
if (drm_buddy_block_is_clear(block)) { mark_cleared(block->left); @@ -378,7 +446,8 @@ static int split_block(struct drm_buddy *mm, clear_reset(block); }
- mark_split(block); + mark_free(mm, block->left); + mark_free(mm, block->right);
return 0; } @@ -407,10 +476,11 @@ EXPORT_SYMBOL(drm_get_buddy); * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block - * in the freelist. + * in the freetree. */ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) { + enum drm_buddy_free_tree src_tree, dst_tree; u64 root_size, size, start; unsigned int order; int i; @@ -425,19 +495,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) size -= root_size; }
+ src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + for (i = 0; i <= mm->max_order; ++i) { - struct drm_buddy_block *block; - - list_for_each_entry_reverse(block, &mm->free_list[i], link) { - if (is_clear != drm_buddy_block_is_clear(block)) { - if (is_clear) { - mark_cleared(block); - mm->clear_avail += drm_buddy_block_size(mm, block); - } else { - clear_reset(block); - mm->clear_avail -= drm_buddy_block_size(mm, block); - } + struct rb_root *root = &mm->free_trees[src_tree][i]; + struct drm_buddy_block *block, *tmp; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + rbtree_remove(mm, block); + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); } + + rbtree_insert(mm, block, dst_tree); } } } @@ -627,23 +702,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm, }
static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, unsigned int order, - unsigned long flags) +get_maxblock(struct drm_buddy *mm, + unsigned int order, + enum drm_buddy_free_tree tree) { struct drm_buddy_block *max_block = NULL, *block = NULL; + struct rb_root *root; unsigned int i;
for (i = order; i <= mm->max_order; ++i) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + root = &mm->free_trees[tree][i]; + block = rbtree_last_free_block(root); if (!block) continue;
@@ -662,46 +731,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order, }
static struct drm_buddy_block * -alloc_from_freelist(struct drm_buddy *mm, +alloc_from_freetree(struct drm_buddy *mm, unsigned int order, unsigned long flags) { struct drm_buddy_block *block = NULL; + struct rb_root *root; + enum drm_buddy_free_tree tree; unsigned int tmp; int err;
+ tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block = get_maxblock(mm, order, flags); + block = get_maxblock(mm, order, tree); if (block) /* Store the obtained block order */ tmp = drm_buddy_block_order(block); } else { for (tmp = order; tmp <= mm->max_order; ++tmp) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + /* Get RB tree root for this order and tree */ + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); if (block) break; } }
if (!block) { - /* Fallback method */ + /* Try allocating from the other tree */ + tree = (tree == DRM_BUDDY_CLEAR_TREE) ? + DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + for (tmp = order; tmp <= mm->max_order; ++tmp) { - if (!list_empty(&mm->free_list[tmp])) { - block = list_last_entry(&mm->free_list[tmp], - struct drm_buddy_block, - link); - if (block) - break; - } + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); + if (block) + break; }
if (!block) @@ -766,7 +833,7 @@ static int __alloc_range(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end)) { if (drm_buddy_block_is_free(block)) { - mark_allocated(block); + mark_allocated(mm, block); total_allocated += drm_buddy_block_size(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) @@ -844,10 +911,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, { u64 rhs_offset, lhs_offset, lhs_size, filled; struct drm_buddy_block *block; - struct list_head *list; + unsigned int tree, order; LIST_HEAD(blocks_lhs); unsigned long pages; - unsigned int order; u64 modify_size; int err;
@@ -857,35 +923,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, if (order == 0) return -ENOSPC;
- list = &mm->free_list[order]; - if (list_empty(list)) - return -ENOSPC; + for_each_free_tree(tree) { + struct rb_root *root; + struct rb_node *iter; + + root = &mm->free_trees[tree][order]; + if (rbtree_is_empty(root)) + continue;
- list_for_each_entry_reverse(block, list, link) { - /* Allocate blocks traversing RHS */ - rhs_offset = drm_buddy_block_offset(block); - err = __drm_buddy_alloc_range(mm, rhs_offset, size, - &filled, blocks); - if (!err || err != -ENOSPC) - return err; - - lhs_size = max((size - filled), min_block_size); - if (!IS_ALIGNED(lhs_size, min_block_size)) - lhs_size = round_up(lhs_size, min_block_size); - - /* Allocate blocks traversing LHS */ - lhs_offset = drm_buddy_block_offset(block) - lhs_size; - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, - NULL, &blocks_lhs); - if (!err) { - list_splice(&blocks_lhs, blocks); - return 0; - } else if (err != -ENOSPC) { + iter = rb_last(root); + while (iter) { + block = rbtree_get_free_block(iter); + + /* Allocate blocks traversing RHS */ + rhs_offset = drm_buddy_block_offset(block); + err = __drm_buddy_alloc_range(mm, rhs_offset, size, + &filled, blocks); + if (!err || err != -ENOSPC) + return err; + + lhs_size = max((size - filled), min_block_size); + if (!IS_ALIGNED(lhs_size, min_block_size)) + lhs_size = round_up(lhs_size, min_block_size); + + /* Allocate blocks traversing LHS */ + lhs_offset = drm_buddy_block_offset(block) - lhs_size; + err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, + NULL, &blocks_lhs); + if (!err) { + list_splice(&blocks_lhs, blocks); + return 0; + } else if (err != -ENOSPC) { + drm_buddy_free_list_internal(mm, blocks); + return err; + } + /* Free blocks for the next iteration */ drm_buddy_free_list_internal(mm, blocks); - return err; + + iter = rb_prev(iter); } - /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); }
return -ENOSPC; @@ -971,7 +1047,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_add(&block->tmp_link, &dfs); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); if (err) { - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -994,8 +1070,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, return __drm_buddy_alloc_range_bias(mm, start, end, order, flags); else - /* Allocate from freelist */ - return alloc_from_freelist(mm, order, flags); + /* Allocate from freetree */ + return alloc_from_freetree(mm, order, flags); }
/** @@ -1012,8 +1088,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. * - * alloc_from_freelist() called when *no* range restrictions - * are enforced, which picks the block from the freelist. + * alloc_from_freetree() called when *no* range restrictions + * are enforced, which picks the block from the freetree. * * Returns: * 0 on success, error code on failure. @@ -1115,7 +1191,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } } while (1);
- mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -1196,12 +1272,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) { - struct drm_buddy_block *block; + struct drm_buddy_block *block, *tmp; + struct rb_root *root; u64 count = 0, free; + unsigned int tree;
- list_for_each_entry(block, &mm->free_list[order], link) { - BUG_ON(!drm_buddy_block_is_free(block)); - count++; + for_each_free_tree(tree) { + root = &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!drm_buddy_block_is_free(block)); + count++; + } }
drm_printf(p, "order-%2d ", order); diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index b4fd43783c50..58d0bb6d2676 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -9,6 +9,34 @@ #include "drm_crtc_internal.h" #include "drm_displayid_internal.h"
+enum { + QUIRK_IGNORE_CHECKSUM, +}; + +struct displayid_quirk { + const struct drm_edid_ident ident; + u8 quirks; +}; + +static const struct displayid_quirk quirks[] = { + { + .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"), + .quirks = BIT(QUIRK_IGNORE_CHECKSUM), + }, +}; + +static u8 get_quirks(const struct drm_edid *drm_edid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(quirks); i++) { + if (drm_edid_match(drm_edid, &quirks[i].ident)) + return quirks[i].quirks; + } + + return 0; +} + static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { @@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid, int length, int index) }
static const struct displayid_header * -validate_displayid(const u8 *displayid, int length, int idx) +validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum) { int i, dispid_length; u8 csum = 0; @@ -41,33 +69,35 @@ validate_displayid(const u8 *displayid, int length, int idx) for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { - DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); - return ERR_PTR(-EINVAL); + DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum, + ignore_checksum ? " (ignoring)" : ""); + + if (!ignore_checksum) + return ERR_PTR(-EINVAL); }
return base; }
-static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, - int *length, int *idx, - int *ext_index) +static const u8 *find_next_displayid_extension(struct displayid_iter *iter) { const struct displayid_header *base; const u8 *displayid; + bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM);
- displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); + displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index); if (!displayid) return NULL;
/* EDID extensions block checksum isn't for us */ - *length = EDID_LENGTH - 1; - *idx = 1; + iter->length = EDID_LENGTH - 1; + iter->idx = 1;
- base = validate_displayid(displayid, *length, *idx); + base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum); if (IS_ERR(base)) return NULL;
- *length = *idx + sizeof(*base) + base->bytes; + iter->length = iter->idx + sizeof(*base) + base->bytes;
return displayid; } @@ -78,6 +108,7 @@ void displayid_iter_edid_begin(const struct drm_edid *drm_edid, memset(iter, 0, sizeof(*iter));
iter->drm_edid = drm_edid; + iter->quirks = get_quirks(drm_edid); }
static const struct displayid_block * @@ -126,10 +157,7 @@ __displayid_iter_next(struct displayid_iter *iter) /* The first section we encounter is the base section */ bool base_section = !iter->section;
- iter->section = drm_find_displayid_extension(iter->drm_edid, - &iter->length, - &iter->idx, - &iter->ext_index); + iter->section = find_next_displayid_extension(iter); if (!iter->section) { iter->drm_edid = NULL; return NULL; diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h index aee1b86a73c1..01ae9812340c 100644 --- a/drivers/gpu/drm/drm_displayid_internal.h +++ b/drivers/gpu/drm/drm_displayid_internal.h @@ -154,6 +154,8 @@ struct displayid_iter {
u8 version; u8 primary_use; + + u8 quirks; };
void displayid_iter_edid_begin(const struct drm_edid *drm_edid, diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 98b44974d42d..8e3128298728 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -51,48 +51,6 @@ static const struct vm_operations_struct psb_fbdev_vm_ops = { * struct fb_ops */
-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) - -static int psb_fbdev_fb_setcolreg(unsigned int regno, - unsigned int red, unsigned int green, - unsigned int blue, unsigned int transp, - struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - uint32_t v; - - if (!fb) - return -ENOMEM; - - if (regno > 255) - return 1; - - red = CMAP_TOHW(red, info->var.red.length); - blue = CMAP_TOHW(blue, info->var.blue.length); - green = CMAP_TOHW(green, info->var.green.length); - transp = CMAP_TOHW(transp, info->var.transp.length); - - v = (red << info->var.red.offset) | - (green << info->var.green.offset) | - (blue << info->var.blue.offset) | - (transp << info->var.transp.offset); - - if (regno < 16) { - switch (fb->format->cpp[0] * 8) { - case 16: - ((uint32_t *) info->pseudo_palette)[regno] = v; - break; - case 24: - case 32: - ((uint32_t *) info->pseudo_palette)[regno] = v; - break; - } - } - - return 0; -} - static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { if (vma->vm_pgoff != 0) @@ -137,7 +95,6 @@ static const struct fb_ops psb_fbdev_fb_ops = { .owner = THIS_MODULE, __FB_DEFAULT_IOMEM_OPS_RDWR, DRM_FB_HELPER_DEFAULT_OPS, - .fb_setcolreg = psb_fbdev_fb_setcolreg, __FB_DEFAULT_IOMEM_OPS_DRAW, .fb_mmap = psb_fbdev_fb_mmap, .fb_destroy = psb_fbdev_fb_destroy, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index a3b83cfe1726..299d760078d4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) vma = eb_lookup_vma(eb, eb->exec[i].handle); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err; + return err; }
err = eb_validate_vma(eb, &eb->exec[i], vma); if (unlikely(err)) { i915_vma_put(vma); - goto err; + return err; }
err = eb_add_vma(eb, ¤t_batch, i, vma); @@ -966,19 +966,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
if (i915_gem_object_is_userptr(vma->obj)) { err = i915_gem_object_userptr_submit_init(vma->obj); - if (err) { - if (i + 1 < eb->buffer_count) { - /* - * Execbuffer code expects last vma entry to be NULL, - * since we already initialized this entry, - * set the next value to NULL or we mess up - * cleanup handling. - */ - eb->vma[i + 1].vma = NULL; - } - + if (err) return err; - }
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT; eb->args->flags |= __EXEC_USERPTR_USED; @@ -986,10 +975,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) }
return 0; - -err: - eb->vma[i].vma = NULL; - return err; }
static int eb_lock_vmas(struct i915_execbuffer *eb) @@ -3374,7 +3359,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.exec = exec; eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); - eb.vma[0].vma = NULL; + memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma)); + eb.batch_pool = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; @@ -3583,7 +3569,18 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, if (err) return err;
- /* Allocate extra slots for use by the command parser */ + /* + * Allocate extra slots for use by the command parser. + * + * Note that this allocation handles two different arrays (the + * exec2_list array, and the eventual eb.vma array introduced in + * i915_gem_do_execbuffer()), that reside in virtually contiguous + * memory. Also note that the allocation intentionally doesn't fill the + * area with zeros, because the exec2_list part doesn't need to be, as + * it's immediately overwritten by user data a few lines below. + * However, the eb.vma part is explicitly zeroed later in + * i915_gem_do_execbuffer(). + */ exec2_list = kvmalloc_array(count + 2, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 5973b6fe13cf..9039c0744100 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -72,7 +72,7 @@ struct intel_memory_region { u16 instance; enum intel_region_id id; char name[16]; - char uabi_name[16]; + char uabi_name[20]; bool private; /* not for userspace */
struct { diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c index 6a8c81fe8c1e..59e7bbe64a3f 100644 --- a/drivers/gpu/drm/imagination/pvr_gem.c +++ b/drivers/gpu/drm/imagination/pvr_gem.c @@ -27,6 +27,16 @@ static void pvr_gem_object_free(struct drm_gem_object *obj) drm_gem_shmem_object_free(obj); }
+static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags) +{ + struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj); + + if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT) + return ERR_PTR(-EPERM); + + return drm_gem_prime_export(obj, flags); +} + static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma) { struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj); @@ -41,6 +51,7 @@ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *v static const struct drm_gem_object_funcs pvr_gem_object_funcs = { .free = pvr_gem_object_free, .print_info = drm_gem_shmem_object_print_info, + .export = pvr_gem_export, .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index ac6620e10262..9672ea1f91a2 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -621,15 +621,27 @@ int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev) return ret; }
-int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, +static void mtk_ddp_comp_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + +static void mtk_ddp_comp_clk_put(void *_clk) +{ + struct clk *clk = _clk; + + clk_put(clk); +} + +int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp, unsigned int comp_id) { struct platform_device *comp_pdev; enum mtk_ddp_comp_type type; struct mtk_ddp_comp_dev *priv; -#if IS_REACHABLE(CONFIG_MTK_CMDQ) int ret; -#endif
if (comp_id >= DDP_COMPONENT_DRM_ID_MAX) return -EINVAL; @@ -651,6 +663,10 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, } comp->dev = &comp_pdev->dev;
+ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_put_device, comp->dev); + if (ret) + return ret; + if (type == MTK_DISP_AAL || type == MTK_DISP_BLS || type == MTK_DISP_CCORR || @@ -666,15 +682,22 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, type == MTK_DSI) return 0;
- priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
- priv->regs = of_iomap(node, 0); + priv->regs = devm_of_iomap(dev, node, 0, NULL); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + priv->clk = of_clk_get(node, 0); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk);
+ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_clk_put, priv->clk); + if (ret) + return ret; + #if IS_REACHABLE(CONFIG_MTK_CMDQ) ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 7289b3dcf22f..3f3d43f4330d 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -350,7 +350,7 @@ static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp) int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev); -int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp, +int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp, unsigned int comp_id); enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id); void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value, diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 4979d49ae25a..c39b8f19c5b6 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2067,6 +2067,7 @@ static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp, endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1); len = of_property_count_elems_of_size(endpoint, "data-lanes", sizeof(u32)); + of_node_put(endpoint); if (len < 0 || len > 4 || len == 3) { dev_err(dev, "invalid data lane size: %d\n", len); return -EINVAL; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index f210c729f1b1..e169e67e3cf0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -874,7 +874,7 @@ static int mtk_drm_probe(struct platform_device *pdev) (void *)private->mmsys_dev, sizeof(*private->mmsys_dev)); private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev; - mtk_ddp_comp_init(NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR], + mtk_ddp_comp_init(dev, NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR], DDP_COMPONENT_DRM_OVL_ADAPTOR); component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev); } @@ -943,7 +943,7 @@ static int mtk_drm_probe(struct platform_device *pdev) node); }
- ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id); + ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id); if (ret) { of_node_put(node); goto err_node; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 6067d08aeee3..35090442cc47 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -175,6 +175,30 @@ static void mgag200_set_startadd(struct mga_device *mdev, WREG_ECRT(0x00, crtcext0); }
+/* + * Set the opmode for the hardware swapper for Big-Endian processor + * support for the frame buffer aperture and DMAWIN space. + */ +static void mgag200_set_datasiz(struct mga_device *mdev, u32 format) +{ +#if defined(__BIG_ENDIAN) + u32 opmode = RREG32(MGAREG_OPMODE); + + opmode &= ~(GENMASK(17, 16) | GENMASK(9, 8) | GENMASK(3, 2)); + + /* Big-endian byte-swapping */ + switch (format) { + case DRM_FORMAT_RGB565: + opmode |= 0x10100; + break; + case DRM_FORMAT_XRGB8888: + opmode |= 0x20200; + break; + } + WREG32(MGAREG_OPMODE, opmode); +#endif +} + void mgag200_init_registers(struct mga_device *mdev) { u8 crtc11, misc; @@ -510,6 +534,7 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_helper_damage_iter iter; struct drm_rect damage;
+ mgag200_set_datasiz(mdev, fb->format->format); drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 159665cb6b14..5d7d2f5a2a1f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -1231,7 +1231,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, return;
/* Set the fence to ALLOW mode so we can access the registers */ - gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2], &a6xx_state->gmu_registers[2], false); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 4597fdb65358..a5beaeec4f55 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -243,14 +243,12 @@ static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc) if (hw_cdm) intf_cfg.cdm = hw_cdm->idx;
- if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode) - phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, - mode_3d); + if (hw_pp && hw_pp->merge_3d && hw_pp->merge_3d->ops.setup_3d_mode) + hw_pp->merge_3d->ops.setup_3d_mode(hw_pp->merge_3d, mode_3d);
/* setup which pp blk will connect to this wb */ - if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk) - phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, - phys_enc->hw_pp->idx); + if (hw_pp && hw_wb->ops.bind_pingpong_blk) + hw_wb->ops.bind_pingpong_blk(hw_wb, hw_pp->idx);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h index 93f8f4f64578..b43c4f9bbcdf 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/atom.h +++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h @@ -152,8 +152,21 @@ static inline struct nv50_head_atom * nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(statec)) return (void *)statec; + + return nv50_head_atom(statec); +} + +static inline struct nv50_head_atom * +nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc); + + if (!statec) + return NULL; + return nv50_head_atom(statec); }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 1199dfc1194c..527fabc5b1ba 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -567,7 +567,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.offset[0] = nvbo->offset;
if (wndw->func->prepare) { - asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc); + asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc); if (IS_ERR(asyh)) return PTR_ERR(asyh);
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c index 472195d4bbbe..9ac3e0759efc 100644 --- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c +++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c @@ -274,6 +274,8 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi) if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n");
+ ctx->panel.prepare_prev_first = true; + drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi); diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 0438b80a6434..09b318fb8e7c 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -214,6 +214,23 @@ panthor_gem_create_with_handle(struct drm_file *file, bo->base.base.resv = bo->exclusive_vm_root_gem->resv; }
+ /* If this is a write-combine mapping, we query the sgt to force a CPU + * cache flush (dma_map_sgtable() is called when the sgt is created). + * This ensures the zero-ing is visible to any uncached mapping created + * by vmap/mmap. + * FIXME: Ideally this should be done when pages are allocated, not at + * BO creation time. + */ + if (shmem->map_wc) { + struct sg_table *sgt; + + sgt = drm_gem_shmem_get_pages_sgt(shmem); + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + goto out_put_gem; + } + } + /* * Allocate an id of idr table where the obj is registered * and handle has the id what user can see. @@ -222,6 +239,7 @@ panthor_gem_create_with_handle(struct drm_file *file, if (!ret) *size = bo->base.base.size;
+out_put_gem: /* drop reference from allocate - handle holds it now. */ drm_gem_object_put(&shmem->base);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 4212b8c91dd4..d69d71ed7dd9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -421,6 +421,11 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, if (ret) return ret;
+ if (!bo->resource) { + ret = -ENODATA; + goto unlock; + } + switch (bo->resource->mem_type) { case TTM_PL_SYSTEM: fallthrough; @@ -435,6 +440,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, ret = -EIO; }
+unlock: ttm_bo_unreserve(bo);
return ret; diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index b71156e9976a..b02b40e68297 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1041,7 +1041,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo) * always succeed here, as long as we hold the lru lock. */ spin_lock(&ttm_bo->bdev->lru_lock); - locked = dma_resv_trylock(ttm_bo->base.resv); + locked = dma_resv_trylock(&ttm_bo->base._resv); spin_unlock(&ttm_bo->bdev->lru_lock); xe_assert(xe, locked);
@@ -1061,13 +1061,6 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) bo = ttm_to_xe_bo(ttm_bo); xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
- /* - * Corner case where TTM fails to allocate memory and this BOs resv - * still points the VMs resv - */ - if (ttm_bo->base.resv != &ttm_bo->base._resv) - return; - if (!xe_ttm_bo_lock_in_destructor(ttm_bo)) return;
@@ -1077,14 +1070,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) * TODO: Don't do this for external bos once we scrub them after * unbind. */ - dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, + dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, fence) { if (xe_fence_is_xe_preempt(fence) && !dma_fence_is_signaled(fence)) { if (!replacement) replacement = dma_fence_get_stub();
- dma_resv_replace_fences(ttm_bo->base.resv, + dma_resv_replace_fences(&ttm_bo->base._resv, fence->context, replacement, DMA_RESV_USAGE_BOOKKEEP); @@ -1092,7 +1085,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) } dma_fence_put(replacement);
- dma_resv_unlock(ttm_bo->base.resv); + dma_resv_unlock(&ttm_bo->base._resv); }
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index a41f453bab59..ac8738da4a64 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -111,7 +111,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach, case XE_PL_TT: sgt = drm_prime_pages_to_sg(obj->dev, bo->ttm.ttm->pages, - bo->ttm.ttm->num_pages); + obj->size >> PAGE_SHIFT); if (IS_ERR(sgt)) return sgt;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 31cca938956f..886d03ccf744 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -125,7 +125,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || - XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) + XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]) || + XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS)) return -EINVAL;
q = xe_exec_queue_lookup(xef, args->exec_queue_id); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index de011f5629fd..292947e44a8a 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -721,9 +721,6 @@ static int do_gt_restart(struct xe_gt *gt) xe_gt_sriov_pf_init_hw(gt);
xe_mocs_init(gt); - err = xe_uc_start(>->uc); - if (err) - return err;
for_each_hw_engine(hwe, gt, id) { xe_reg_sr_apply_mmio(&hwe->reg_sr, gt); @@ -733,6 +730,10 @@ static int do_gt_restart(struct xe_gt *gt) /* Get CCS mode in sync between sw/hw */ xe_gt_apply_ccs_mode(gt);
+ err = xe_uc_start(>->uc); + if (err) + return err; + /* Restore GT freq to expected values */ xe_gt_sanitize_freq(gt);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 03d674e9e807..f316be1e9645 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -578,6 +578,24 @@ static u32 wq_space_until_wrap(struct xe_exec_queue *q) return (WQ_SIZE - q->guc->wqi_tail); }
+static inline void relaxed_ms_sleep(unsigned int delay_ms) +{ + unsigned long min_us, max_us; + + if (!delay_ms) + return; + + if (delay_ms > 20) { + msleep(delay_ms); + return; + } + + min_us = mul_u32_u32(delay_ms, 1000); + max_us = min_us + 500; + + usleep_range(min_us, max_us); +} + static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) { struct xe_guc *guc = exec_queue_to_guc(q); @@ -1356,7 +1374,7 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg) since_resume_ms;
if (wait_ms > 0 && q->guc->resume_time) - msleep(wait_ms); + relaxed_ms_sleep(wait_ms);
set_exec_queue_suspended(q); disable_scheduling(q, false); diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c index 65b2e147c4b9..894a6bd33285 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.c +++ b/drivers/gpu/drm/xe/xe_heci_gsc.c @@ -230,7 +230,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir) if (xe->heci_gsc.irq < 0) return;
- ret = generic_handle_irq(xe->heci_gsc.irq); + ret = generic_handle_irq_safe(xe->heci_gsc.irq); if (ret) drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret); } @@ -250,7 +250,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir) if (xe->heci_gsc.irq < 0) return;
- ret = generic_handle_irq(xe->heci_gsc.irq); + ret = generic_handle_irq_safe(xe->heci_gsc.irq); if (ret) drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret); } diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index d306ed0a0443..3f142f95e5d4 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -1200,6 +1200,9 @@ static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value, static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value, struct xe_oa_open_param *param) { + if (XE_IOCTL_DBG(oa->xe, value > DRM_XE_MAX_SYNCS)) + return -EINVAL; + param->num_syncs = value; return 0; } @@ -1263,7 +1266,7 @@ static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_fr ARRAY_SIZE(xe_oa_set_property_funcs_config));
if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) || - XE_IOCTL_DBG(oa->xe, ext.pad)) + XE_IOCTL_DBG(oa->xe, !ext.property) || XE_IOCTL_DBG(oa->xe, ext.pad)) return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open)); @@ -2375,11 +2378,13 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi goto sysfs_err; }
- mutex_unlock(&oa->metrics_lock); + id = oa_config->id;
- drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id); + drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, id); + + mutex_unlock(&oa->metrics_lock);
- return oa_config->id; + return id;
sysfs_err: mutex_unlock(&oa->metrics_lock); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 30625ce691fa..02f2b7ad8a85 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1468,7 +1468,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
INIT_LIST_HEAD(&vm->preempt.exec_queues); - vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ + if (flags & XE_VM_FLAG_FAULT_MODE) + vm->preempt.min_run_period_ms = 0; + else + vm->preempt.min_run_period_ms = 5;
for_each_tile(tile, xe, id) xe_range_fence_tree_init(&vm->rftree[id]); @@ -2829,6 +2832,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, if (XE_IOCTL_DBG(xe, args->extensions)) return -EINVAL;
+ if (XE_IOCTL_DBG(xe, args->num_syncs > DRM_XE_MAX_SYNCS)) + return -EINVAL; + if (args->num_binds > 1) { u64 __user *bind_user = u64_to_user_ptr(args->vector_of_binds); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index a4b4091cfd0d..4f95308a61b8 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -243,7 +243,7 @@ struct xe_vm { * @min_run_period_ms: The minimum run period before preempting * an engine again */ - s64 min_run_period_ms; + unsigned int min_run_period_ms; /** @exec_queues: list of exec queues attached to this VM */ struct list_head exec_queues; /** @num_exec_queues: number exec queues attached to this VM */ diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index fa3efe9701c9..c1708caa6b6c 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -864,7 +864,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
switch (usage->hid) { /* These usage IDs map directly to the usage codes. */ - case HID_GD_X: case HID_GD_Y: case HID_GD_Z: + case HID_GD_X: case HID_GD_Y: case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: if (field->flags & HID_MAIN_ITEM_RELATIVE) map_rel(usage->hid & 0xf); @@ -872,6 +872,22 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel map_abs_clear(usage->hid & 0xf); break;
+ case HID_GD_Z: + /* HID_GD_Z is mapped to ABS_DISTANCE for stylus/pen */ + if (field->flags & HID_MAIN_ITEM_RELATIVE) { + map_rel(usage->hid & 0xf); + } else { + if (field->application == HID_DG_PEN || + field->physical == HID_DG_PEN || + field->logical == HID_DG_STYLUS || + field->physical == HID_DG_STYLUS || + field->application == HID_DG_DIGITIZER) + map_abs_clear(ABS_DISTANCE); + else + map_abs_clear(usage->hid & 0xf); + } + break; + case HID_GD_WHEEL: if (field->flags & HID_MAIN_ITEM_RELATIVE) { set_bit(REL_WHEEL, input->relbit); diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index cce54dd9884a..3b5412541c92 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -805,7 +805,6 @@ static void delayedwork_callback(struct work_struct *work) struct dj_workitem workitem; unsigned long flags; int count; - int retval;
dbg_hid("%s\n", __func__);
@@ -842,11 +841,7 @@ static void delayedwork_callback(struct work_struct *work) logi_dj_recv_destroy_djhid_device(djrcv_dev, &workitem); break; case WORKITEM_TYPE_UNKNOWN: - retval = logi_dj_recv_query_paired_devices(djrcv_dev); - if (retval) { - hid_err(djrcv_dev->hidpp, "%s: logi_dj_recv_query_paired_devices error: %d\n", - __func__, retval); - } + logi_dj_recv_query_paired_devices(djrcv_dev); break; case WORKITEM_TYPE_EMPTY: dbg_hid("%s: device list is empty\n", __func__); @@ -1239,8 +1234,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
djrcv_dev->last_query = jiffies;
- if (djrcv_dev->type != recvr_type_dj) - return logi_dj_recv_query_hidpp_devices(djrcv_dev); + if (djrcv_dev->type != recvr_type_dj) { + retval = logi_dj_recv_query_hidpp_devices(djrcv_dev); + goto out; + }
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) @@ -1250,6 +1247,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES; retval = logi_dj_recv_send_report(djrcv_dev, dj_report); kfree(dj_report); +out: + if (retval < 0) + hid_err(djrcv_dev->hidpp, "%s error:%d\n", __func__, retval); + return retval; }
@@ -1275,6 +1276,8 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, (u8)timeout;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report); + if (retval) + goto out;
/* * Ugly sleep to work around a USB 3.0 bug when the receiver is @@ -1283,11 +1286,6 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, * 50 msec should gives enough time to the receiver to be ready. */ msleep(50); - - if (retval) { - kfree(dj_report); - return retval; - } }
/* @@ -1313,7 +1311,12 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+out: kfree(dj_report); + + if (retval < 0) + hid_err(hdev, "%s error:%d\n", __func__, retval); + return retval; }
@@ -1835,11 +1838,8 @@ static int logi_dj_probe(struct hid_device *hdev,
if (has_hidpp) { retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); - if (retval < 0) { - hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n", - __func__, retval); + if (retval < 0) goto switch_to_dj_mode_fail; - } }
/* This is enabling the polling urb on the IN endpoint */ @@ -1857,15 +1857,11 @@ static int logi_dj_probe(struct hid_device *hdev, spin_lock_irqsave(&djrcv_dev->lock, flags); djrcv_dev->ready = true; spin_unlock_irqrestore(&djrcv_dev->lock, flags); - retval = logi_dj_recv_query_paired_devices(djrcv_dev); - if (retval < 0) { - hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n", - __func__, retval); - /* - * This can happen with a KVM, let the probe succeed, - * logi_dj_recv_queue_unknown_work will retry later. - */ - } + /* + * This can fail with a KVM. Ignore errors to let the probe + * succeed, logi_dj_recv_queue_unknown_work will retry later. + */ + logi_dj_recv_query_paired_devices(djrcv_dev); }
return 0; @@ -1882,18 +1878,12 @@ static int logi_dj_probe(struct hid_device *hdev, #ifdef CONFIG_PM static int logi_dj_reset_resume(struct hid_device *hdev) { - int retval; struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
if (!djrcv_dev || djrcv_dev->hidpp != hdev) return 0;
- retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); - if (retval < 0) { - hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n", - __func__, retval); - } - + logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); return 0; } #endif diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index f73f46193748..9df78861f5f8 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -75,6 +75,9 @@ #define DELL_SMM_NO_TEMP 10 #define DELL_SMM_NO_FANS 4
+/* limit fan multiplier to avoid overflow */ +#define DELL_SMM_MAX_FAN_MULT (INT_MAX / U16_MAX) + struct smm_regs { unsigned int eax; unsigned int ebx; @@ -1203,6 +1206,12 @@ static int dell_smm_init_data(struct device *dev, const struct dell_smm_ops *ops data->ops = ops; /* All options must not be 0 */ data->i8k_fan_mult = fan_mult ? : I8K_FAN_MULT; + if (data->i8k_fan_mult > DELL_SMM_MAX_FAN_MULT) { + dev_err(dev, + "fan multiplier %u is too large (max %u)\n", + data->i8k_fan_mult, DELL_SMM_MAX_FAN_MULT); + return -EINVAL; + } data->i8k_fan_max = fan_max ? : I8K_FAN_HIGH; data->i8k_pwm_mult = DIV_ROUND_UP(255, data->i8k_fan_max);
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c index 228c5f6c6f38..129f3a9e8fe9 100644 --- a/drivers/hwmon/ibmpex.c +++ b/drivers/hwmon/ibmpex.c @@ -277,6 +277,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev, { struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+ if (!data) + return -ENODEV; + ibmpex_reset_high_low_data(data);
return count; @@ -508,6 +511,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data) { int i, j;
+ hwmon_device_unregister(data->hwmon_dev); + dev_set_drvdata(data->bmc_device, NULL); + device_remove_file(data->bmc_device, &sensor_dev_attr_reset_high_low.dev_attr); device_remove_file(data->bmc_device, &dev_attr_name.attr); @@ -521,8 +527,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data) }
list_del(&data->list); - dev_set_drvdata(data->bmc_device, NULL); - hwmon_device_unregister(data->hwmon_dev); + ipmi_destroy_user(data->user); kfree(data->sensors); kfree(data); diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c index 953dfe2bd166..d98c57918ce3 100644 --- a/drivers/hwmon/ltc4282.c +++ b/drivers/hwmon/ltc4282.c @@ -1016,8 +1016,9 @@ static umode_t ltc4282_in_is_visible(const struct ltc4282_state *st, u32 attr) case hwmon_in_max: case hwmon_in_min: case hwmon_in_enable: - case hwmon_in_reset_history: return 0644; + case hwmon_in_reset_history: + return 0200; default: return 0; } @@ -1036,8 +1037,9 @@ static umode_t ltc4282_curr_is_visible(u32 attr) return 0444; case hwmon_curr_max: case hwmon_curr_min: - case hwmon_curr_reset_history: return 0644; + case hwmon_curr_reset_history: + return 0200; default: return 0; } @@ -1055,8 +1057,9 @@ static umode_t ltc4282_power_is_visible(u32 attr) return 0444; case hwmon_power_max: case hwmon_power_min: - case hwmon_power_reset_history: return 0644; + case hwmon_power_reset_history: + return 0200; default: return 0; } diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c index 0ccb5eb596fc..4c9e7892a73c 100644 --- a/drivers/hwmon/max16065.c +++ b/drivers/hwmon/max16065.c @@ -216,12 +216,13 @@ static ssize_t max16065_current_show(struct device *dev, struct device_attribute *da, char *buf) { struct max16065_data *data = max16065_update_device(dev); + int curr_sense = data->curr_sense;
- if (unlikely(data->curr_sense < 0)) - return data->curr_sense; + if (unlikely(curr_sense < 0)) + return curr_sense;
return sysfs_emit(buf, "%d\n", - ADC_TO_CURR(data->curr_sense, data->curr_gain)); + ADC_TO_CURR(curr_sense, data->curr_gain)); }
static ssize_t max16065_limit_store(struct device *dev, diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c index 0735a1d2c20f..6926d787b5ad 100644 --- a/drivers/hwmon/max6697.c +++ b/drivers/hwmon/max6697.c @@ -548,7 +548,7 @@ static int max6697_probe(struct i2c_client *client) struct regmap *regmap; int err;
- regmap = regmap_init_i2c(client, &max6697_regmap_config); + regmap = devm_regmap_init_i2c(client, &max6697_regmap_config); if (IS_ERR(regmap)) return PTR_ERR(regmap);
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 02c5a3bb1071..84aaf817144c 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -401,7 +401,7 @@ static int tmp401_chip_read(struct device *dev, u32 attr, int channel, long *val ret = regmap_read(data->regmap, TMP401_CONVERSION_RATE, ®val); if (ret < 0) return ret; - *val = (1 << (7 - regval)) * 125; + *val = (1 << (7 - min(regval, 7))) * 125; break; case hwmon_chip_temp_reset_history: *val = 0; diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index ace854b370a0..996e36951f9d 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -218,9 +218,14 @@ static u8 fan_to_reg(long rpm, int div) return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); }
-#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ - ((val) == 255 ? 0 : \ - 1350000 / ((val) * (div)))) +static int fan_from_reg(int val, int div) +{ + if (val == 0) + return -1; + if (val == 255) + return 0; + return 1350000 / (val * div); +}
/* for temp1 which is 8-bit resolution, LSB = 1 degree Celsius */ #define TEMP1_FROM_REG(val) ((val) * 1000) @@ -521,7 +526,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ struct w83791d_data *data = w83791d_update_device(dev); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", \ - FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ + fan_from_reg(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ }
show_fan_reg(fan); @@ -585,10 +590,10 @@ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr, if (err) return err;
+ mutex_lock(&data->update_lock); /* Save fan_min */ - min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); + min = fan_from_reg(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
- mutex_lock(&data->update_lock); data->fan_div[nr] = div_to_reg(nr, val);
switch (nr) { diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c index 9b81bd406e05..1d9109ca1585 100644 --- a/drivers/hwmon/w83l786ng.c +++ b/drivers/hwmon/w83l786ng.c @@ -76,15 +76,25 @@ FAN_TO_REG(long rpm, int div) return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254); }
-#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \ - ((val) == 255 ? 0 : \ - 1350000 / ((val) * (div)))) +static int fan_from_reg(int val, int div) +{ + if (val == 0) + return -1; + if (val == 255) + return 0; + return 1350000 / (val * div); +}
/* for temp */ #define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \ : (val)) / 1000, 0, 0xff)) -#define TEMP_FROM_REG(val) (((val) & 0x80 ? \ - (val) - 0x100 : (val)) * 1000) + +static int temp_from_reg(int val) +{ + if (val & 0x80) + return (val - 0x100) * 1000; + return val * 1000; +}
/* * The analog voltage inputs have 8mV LSB. Since the sysfs output is @@ -280,7 +290,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ int nr = to_sensor_dev_attr(attr)->index; \ struct w83l786ng_data *data = w83l786ng_update_device(dev); \ return sprintf(buf, "%d\n", \ - FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ + fan_from_reg(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \ }
show_fan_reg(fan); @@ -347,7 +357,7 @@ store_fan_div(struct device *dev, struct device_attribute *attr,
/* Save fan_min */ mutex_lock(&data->update_lock); - min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])); + min = fan_from_reg(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
data->fan_div[nr] = DIV_TO_REG(val);
@@ -409,7 +419,7 @@ show_temp(struct device *dev, struct device_attribute *attr, char *buf) int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83l786ng_data *data = w83l786ng_update_device(dev); - return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr][index])); + return sprintf(buf, "%d\n", temp_from_reg(data->temp[nr][index])); }
static ssize_t diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index d72993355473..d0973dc5fdd4 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -810,13 +810,17 @@ static int intel_th_output_open(struct inode *inode, struct file *file) int err;
dev = bus_find_device_by_devt(&intel_th_bus, inode->i_rdev); - if (!dev || !dev->driver) - return -ENODEV; + if (!dev || !dev->driver) { + err = -ENODEV; + goto out_no_device; + }
thdrv = to_intel_th_driver(dev->driver); fops = fops_get(thdrv->fops); - if (!fops) - return -ENODEV; + if (!fops) { + err = -ENODEV; + goto out_put_device; + }
replace_fops(file, fops);
@@ -824,10 +828,16 @@ static int intel_th_output_open(struct inode *inode, struct file *file)
if (file->f_op->open) { err = file->f_op->open(inode, file); - return err; + if (err) + goto out_put_device; }
return 0; + +out_put_device: + put_device(dev); +out_no_device: + return err; }
static const struct file_operations intel_th_output_fops = { diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c index 143165300949..73bab21aefc8 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c @@ -461,13 +461,16 @@ struct amd_mp2_dev *amd_mp2_find_device(void) { struct device *dev; struct pci_dev *pci_dev; + struct amd_mp2_dev *mp2_dev;
dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL); if (!dev) return NULL;
pci_dev = to_pci_dev(dev); - return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev); + mp2_dev = (struct amd_mp2_dev *)pci_get_drvdata(pci_dev); + put_device(dev); + return mp2_dev; } EXPORT_SYMBOL_GPL(amd_mp2_find_device);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 2d32896d0673..e3d76e423842 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -78,6 +78,7 @@ #define DW_IC_TX_ABRT_SOURCE 0x80 #define DW_IC_ENABLE_STATUS 0x9c #define DW_IC_CLR_RESTART_DET 0xa8 +#define DW_IC_SMBUS_INTR_MASK 0xcc #define DW_IC_COMP_PARAM_1 0xf4 #define DW_IC_COMP_VERSION 0xf8 #define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A /* "111*" == v1.11* */ diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 52dc666c3ef4..196bb073c6bc 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -203,6 +203,13 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev) /* Disable the adapter */ __i2c_dw_disable(dev);
+ /* + * Mask SMBus interrupts to block storms from broken + * firmware that leaves IC_SMBUS=1; the handler never + * services them. + */ + regmap_write(dev->map, DW_IC_SMBUS_INTR_MASK, 0); + /* Write standard speed timing parameters */ regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 426e3c9f88a1..205d1f103b3c 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -123,7 +123,7 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
chan = adc_dev->channel_line[i];
- if (adc_dev->step_avg[i]) + if (adc_dev->step_avg[i] && adc_dev->step_avg[i] <= STEPCONFIG_AVG_16) stepconfig = STEPCONFIG_AVG(ffs(adc_dev->step_avg[i]) - 1) | STEPCONFIG_FIFO1; else diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 929e89841c12..27eac10638cb 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -80,37 +80,25 @@ static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { .min = sizeof(struct rdma_nla_ls_gid)}, };
-static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) +static void ib_nl_process_ip_rsep(const struct nlmsghdr *nlh) { struct nlattr *tb[LS_NLA_TYPE_MAX] = {}; + union ib_gid gid; + struct addr_req *req; + int found = 0; int ret;
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) - return false; + return;
ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), nlmsg_len(nlh), ib_nl_addr_policy, NULL); if (ret) - return false; - - return true; -} - -static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh) -{ - const struct nlattr *head, *curr; - union ib_gid gid; - struct addr_req *req; - int len, rem; - int found = 0; - - head = (const struct nlattr *)nlmsg_data(nlh); - len = nlmsg_len(nlh); + return;
- nla_for_each_attr(curr, head, len, rem) { - if (curr->nla_type == LS_NLA_TYPE_DGID) - memcpy(&gid, nla_data(curr), nla_len(curr)); - } + if (!tb[LS_NLA_TYPE_DGID]) + return; + memcpy(&gid, nla_data(tb[LS_NLA_TYPE_DGID]), sizeof(gid));
spin_lock_bh(&lock); list_for_each_entry(req, &req_list, list) { @@ -137,8 +125,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb, !(NETLINK_CB(skb).sk)) return -EPERM;
- if (ib_nl_is_good_ip_resp(nlh)) - ib_nl_process_good_ip_rsep(nlh); + ib_nl_process_ip_rsep(nlh);
return 0; } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 81bc24a346d3..52d70eeb0c52 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2009,6 +2009,7 @@ static void destroy_mc(struct rdma_id_private *id_priv, ib_sa_free_multicast(mc->sa_mc);
if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { + struct rdma_cm_event *event = &mc->iboe_join.event; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct net_device *ndev = NULL; @@ -2031,6 +2032,8 @@ static void destroy_mc(struct rdma_id_private *id_priv, dev_put(ndev);
cancel_work_sync(&mc->iboe_join.work); + if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) + rdma_destroy_ah_attr(&event->param.ud.ah_attr); } kfree(mc); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index df2aa15a5bc9..bbc131737378 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2823,8 +2823,10 @@ int ib_del_sub_device_and_put(struct ib_device *sub) { struct ib_device *parent = sub->parent;
- if (!parent) + if (!parent) { + ib_device_put(sub); return -EOPNOTSUPP; + }
mutex_lock(&parent->subdev_lock); list_del(&sub->subdev_list); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index dc40001072a5..8dd96dc98fd3 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -735,7 +735,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, (struct in6_addr *)dgid); return 0; } else if (net_type == RDMA_NETWORK_IPV6 || - net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) { + net_type == RDMA_NETWORK_IB || net_type == RDMA_NETWORK_ROCE_V1) { *dgid = hdr->ibgrh.dgid; *sgid = hdr->ibgrh.sgid; return 0; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c2abf2bb8026..c1587845f280 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2823,14 +2823,9 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr, wqe.rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC; } - switch (wr->send_flags) { - case IB_SEND_IP_CSUM: + if (wr->send_flags & IB_SEND_IP_CSUM) wqe.rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM; - break; - default: - break; - } fallthrough; case IB_WR_SEND_WITH_INV: rc = bnxt_re_build_send_wqe(qp, wr, &wqe); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 7a099580ca8b..38ded4687122 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -1117,7 +1117,7 @@ static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt) creq_db->dbinfo.flags = 0; creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION; creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id); - if (!creq_db->reg.bar_id) + if (!creq_db->reg.bar_base) dev_err(&pdev->dev, "QPLIB: CREQ BAR region %d resc start is 0!", creq_db->reg.bar_id); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index b785d9e7774c..dfb72a5adc91 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -70,9 +70,7 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, for (i = 0; i < pbl->pg_count; i++) { if (pbl->pg_arr[i]) dma_free_coherent(&pdev->dev, pbl->pg_size, - (void *)((unsigned long) - pbl->pg_arr[i] & - PAGE_MASK), + pbl->pg_arr[i], pbl->pg_map_arr[i]); else dev_warn(&pdev->dev, @@ -243,7 +241,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, if (npbl % BIT(MAX_PDL_LVL_SHIFT)) npde++; /* Alloc PDE pages */ - sginfo.pgsize = npde * pg_size; + sginfo.pgsize = npde * ROCE_PG_SIZE_4K; sginfo.npages = 1; rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo); if (rc) @@ -251,7 +249,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
/* Alloc PBL pages */ sginfo.npages = npbl; - sginfo.pgsize = PAGE_SIZE; + sginfo.pgsize = ROCE_PG_SIZE_4K; rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo); if (rc) goto fail; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index cc13415ff7e7..46eddef7a1cc 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1241,13 +1241,9 @@ static int umem_to_page_list(struct efa_dev *dev, u32 hp_cnt, u8 hp_shift) { - u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT); struct ib_block_iter biter; unsigned int hp_idx = 0;
- ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n", - hp_cnt, pages_in_hp); - rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift)) page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 0422787592d8..87a6d58663de 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -251,7 +251,7 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct neighbour *neigh = ptr; - struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev; + struct net_device *real_dev, *netdev; struct irdma_device *iwdev; struct ib_device *ibdev; __be32 *p; @@ -260,6 +260,7 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
switch (event) { case NETEVENT_NEIGH_UPDATE: + netdev = neigh->dev; real_dev = rdma_vlan_dev_real_dev(netdev); if (!real_dev) real_dev = netdev; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 71387811b281..2b397a544cb9 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1464,6 +1464,7 @@ static void query_fast_reg_mode(struct rtrs_clt_path *clt_path) mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); max_pages_per_mr = ib_dev->attrs.max_mr_size; do_div(max_pages_per_mr, (1ull << mr_page_shift)); + max_pages_per_mr = min_not_zero((u32)max_pages_per_mr, U32_MAX); clt_path->max_pages_per_mr = min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, ib_dev->attrs.max_fast_reg_page_list_len); diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c index c035216dd27c..2f130f819363 100644 --- a/drivers/input/keyboard/lkkbd.c +++ b/drivers/input/keyboard/lkkbd.c @@ -670,7 +670,8 @@ static int lkkbd_connect(struct serio *serio, struct serio_driver *drv)
return 0;
- fail3: serio_close(serio); + fail3: disable_work_sync(&lk->tq); + serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(lk); @@ -684,6 +685,8 @@ static void lkkbd_disconnect(struct serio *serio) { struct lkkbd *lk = serio_get_drvdata(serio);
+ disable_work_sync(&lk->tq); + input_get_device(lk->dev); input_unregister_device(lk->dev); serio_close(serio); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 4e37fc3f1a9e..873471f4420f 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -2977,6 +2977,7 @@ static void alps_disconnect(struct psmouse *psmouse)
psmouse_reset(psmouse); timer_shutdown_sync(&priv->timer); + disable_delayed_work_sync(&priv->dev3_register_work); if (priv->dev2) input_unregister_device(priv->dev2); if (!IS_ERR_OR_NULL(priv->dev3)) diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 630cdd5a1328..b24127e923db 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1169,6 +1169,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "X5KK45xS_X5SP45xS"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues * after suspend fixable with the forcenorestore quirk. diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 294b7ceded27..e54d0feeb9f1 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -85,7 +85,7 @@ static int titsc_config_wires(struct titsc *ts_dev) wire_order[i] = ts_dev->config_inp[i] & 0x0F; if (WARN_ON(analog_line[i] > 7)) return -EINVAL; - if (WARN_ON(wire_order[i] > ARRAY_SIZE(config_pins))) + if (WARN_ON(wire_order[i] >= ARRAY_SIZE(config_pins))) return -EINVAL; }
diff --git a/drivers/interconnect/qcom/sdx75.c b/drivers/interconnect/qcom/sdx75.c index 7f422c27488d..589b6b30d4ab 100644 --- a/drivers/interconnect/qcom/sdx75.c +++ b/drivers/interconnect/qcom/sdx75.c @@ -16,15 +16,6 @@ #include "icc-rpmh.h" #include "sdx75.h"
-static struct qcom_icc_node qpic_core_master = { - .name = "qpic_core_master", - .id = SDX75_MASTER_QPIC_CORE, - .channels = 1, - .buswidth = 4, - .num_links = 1, - .links = { SDX75_SLAVE_QPIC_CORE }, -}; - static struct qcom_icc_node qup0_core_master = { .name = "qup0_core_master", .id = SDX75_MASTER_QUP_CORE_0, @@ -375,14 +366,6 @@ static struct qcom_icc_node xm_usb3 = { .links = { SDX75_SLAVE_A1NOC_CFG }, };
-static struct qcom_icc_node qpic_core_slave = { - .name = "qpic_core_slave", - .id = SDX75_SLAVE_QPIC_CORE, - .channels = 1, - .buswidth = 4, - .num_links = 0, -}; - static struct qcom_icc_node qup0_core_slave = { .name = "qup0_core_slave", .id = SDX75_SLAVE_QUP_CORE_0, @@ -831,12 +814,6 @@ static struct qcom_icc_bcm bcm_mc0 = { .nodes = { &ebi }, };
-static struct qcom_icc_bcm bcm_qp0 = { - .name = "QP0", - .num_nodes = 1, - .nodes = { &qpic_core_slave }, -}; - static struct qcom_icc_bcm bcm_qup0 = { .name = "QUP0", .keepalive = true, @@ -898,14 +875,11 @@ static struct qcom_icc_bcm bcm_sn4 = { };
static struct qcom_icc_bcm * const clk_virt_bcms[] = { - &bcm_qp0, &bcm_qup0, };
static struct qcom_icc_node * const clk_virt_nodes[] = { - [MASTER_QPIC_CORE] = &qpic_core_master, [MASTER_QUP_CORE_0] = &qup0_core_master, - [SLAVE_QPIC_CORE] = &qpic_core_slave, [SLAVE_QUP_CORE_0] = &qup0_core_slave, };
diff --git a/drivers/interconnect/qcom/sdx75.h b/drivers/interconnect/qcom/sdx75.h index 24e887159920..34f51add59dc 100644 --- a/drivers/interconnect/qcom/sdx75.h +++ b/drivers/interconnect/qcom/sdx75.h @@ -33,7 +33,6 @@ #define SDX75_MASTER_QDSS_ETR 24 #define SDX75_MASTER_QDSS_ETR_1 25 #define SDX75_MASTER_QPIC 26 -#define SDX75_MASTER_QPIC_CORE 27 #define SDX75_MASTER_QUP_0 28 #define SDX75_MASTER_QUP_CORE_0 29 #define SDX75_MASTER_SDCC_1 30 @@ -76,7 +75,6 @@ #define SDX75_SLAVE_QDSS_CFG 67 #define SDX75_SLAVE_QDSS_STM 68 #define SDX75_SLAVE_QPIC 69 -#define SDX75_SLAVE_QPIC_CORE 70 #define SDX75_SLAVE_QUP_0 71 #define SDX75_SLAVE_QUP_CORE_0 72 #define SDX75_SLAVE_SDCC_1 73 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index af147d279a29..8659cb0bc7e6 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1602,13 +1602,22 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
if (alloc_dev_table(pci_seg)) - return NULL; + goto err_free_pci_seg; if (alloc_alias_table(pci_seg)) - return NULL; + goto err_free_dev_table; if (alloc_rlookup_table(pci_seg)) - return NULL; + goto err_free_alias_table;
return pci_seg; + +err_free_alias_table: + free_alias_table(pci_seg); +err_free_dev_table: + free_dev_table(pci_seg); +err_free_pci_seg: + list_del(&pci_seg->list); + kfree(pci_seg); + return NULL; }
static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id, diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 6a019670efc7..5fb7bb7a84f4 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3172,7 +3172,7 @@ static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, struct irte_ga *irte) { - bool ret; + int ret;
ret = __modify_irte_ga(iommu, devid, index, irte); if (ret) diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index e8d7bcbee1a2..05fad2c50c5a 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -790,6 +790,8 @@ static int apple_dart_of_xlate(struct device *dev, struct apple_dart *cfg_dart; int i, sid;
+ put_device(&iommu_pdev->dev); + if (args->args_count != 1) return -EINVAL; sid = args->args[0]; diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index b98a7a598b89..8a76b4f46a32 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -566,14 +566,14 @@ static int qcom_iommu_of_xlate(struct device *dev,
qcom_iommu = platform_get_drvdata(iommu_pdev);
+ put_device(&iommu_pdev->dev); + /* make sure the asid specified in dt is valid, so we don't have * to sanity check this elsewhere: */ if (WARN_ON(asid > qcom_iommu->max_asid) || - WARN_ON(qcom_iommu->ctxs[asid] == NULL)) { - put_device(&iommu_pdev->dev); + WARN_ON(qcom_iommu->ctxs[asid] == NULL)) return -EINVAL; - }
if (!dev_iommu_priv_get(dev)) { dev_iommu_priv_set(dev, qcom_iommu); @@ -582,10 +582,8 @@ static int qcom_iommu_of_xlate(struct device *dev, * multiple different iommu devices. Multiple context * banks are ok, but multiple devices are not: */ - if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) { - put_device(&iommu_pdev->dev); + if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) return -EINVAL; - } }
return iommu_fwspec_add_ids(dev, &asid, 1); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7465dbb6fa80..cb006a0387d8 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1443,17 +1443,14 @@ static int exynos_iommu_of_xlate(struct device *dev, return -ENODEV;
data = platform_get_drvdata(sysmmu); - if (!data) { - put_device(&sysmmu->dev); + put_device(&sysmmu->dev); + if (!data) return -ENODEV; - }
if (!owner) { owner = kzalloc(sizeof(*owner), GFP_KERNEL); - if (!owner) { - put_device(&sysmmu->dev); + if (!owner) return -ENOMEM; - }
INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 71b3383b7115..066e4cd6eea5 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -1309,17 +1309,17 @@ static struct irq_chip intel_ir_chip = { * irq_enter(); * handle_edge_irq() * irq_chip_ack_parent() - * irq_move_irq(); // No EOI + * intel_ack_posted_msi_irq(); // No EOI * handle_irq_event() * driver_handler() * handle_edge_irq() * irq_chip_ack_parent() - * irq_move_irq(); // No EOI + * intel_ack_posted_msi_irq(); // No EOI * handle_irq_event() * driver_handler() * handle_edge_irq() * irq_chip_ack_parent() - * irq_move_irq(); // No EOI + * intel_ack_posted_msi_irq(); // No EOI * handle_irq_event() * driver_handler() * apic_eoi() @@ -1328,7 +1328,7 @@ static struct irq_chip intel_ir_chip = { */ static struct irq_chip intel_ir_chip_post_msi = { .name = "INTEL-IR-POST", - .irq_ack = irq_move_irq, + .irq_ack = intel_ack_posted_msi_irq, .irq_set_affinity = intel_ir_set_affinity, .irq_compose_msi_msg = intel_ir_compose_msi_msg, .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 503c5d23c1ea..aadcd42cc8b8 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -80,6 +80,9 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm if (!group) return ERR_PTR(-ENODEV);
+ if (IS_ENABLED(CONFIG_X86)) + return ERR_PTR(-EOPNOTSUPP); + mutex_lock(&iommu_sva_lock);
/* Allocate mm->pasid if necessary. */ diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 540437be168a..aed260d4a93c 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -836,14 +836,20 @@ static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, unsigned long start, size_t length) { + unsigned long last; struct iommufd_ioas *ioas; int rc;
+ if (!length) + return -EINVAL; + if (check_add_overflow(start, length - 1, &last)) + return -EOVERFLOW; + ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id); if (IS_ERR(ioas)) return PTR_ERR(ioas); down_write(&ioas->iopt.iova_rwsem); - rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); + rc = iopt_reserve_iova(&ioas->iopt, start, last, NULL); up_write(&ioas->iopt.iova_rwsem); iommufd_put_object(ucmd->ictx, &ioas->obj); return rc; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ae69691471e9..e2b41d322e3e 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -719,6 +719,8 @@ static int ipmmu_init_platform_device(struct device *dev,
dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
+ put_device(&ipmmu_pdev->dev); + return 0; }
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 32deab732209..5ebf31f50c06 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -975,6 +975,8 @@ static int mtk_iommu_of_xlate(struct device *dev, return -EINVAL;
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); + + put_device(&m4updev->dev); }
return iommu_fwspec_add_ids(dev, args->args, 1); @@ -1213,16 +1215,19 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m }
component_match_add(dev, match, component_compare_dev, &plarbdev->dev); - platform_device_put(plarbdev); }
- if (!frst_avail_smicomm_node) - return -EINVAL; + if (!frst_avail_smicomm_node) { + ret = -EINVAL; + goto err_larbdev_put; + }
pcommdev = of_find_device_by_node(frst_avail_smicomm_node); of_node_put(frst_avail_smicomm_node); - if (!pcommdev) - return -ENODEV; + if (!pcommdev) { + ret = -ENODEV; + goto err_larbdev_put; + } data->smicomm_dev = &pcommdev->dev;
link = device_link_add(data->smicomm_dev, dev, @@ -1230,7 +1235,8 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m platform_device_put(pcommdev); if (!link) { dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); - return -EINVAL; + ret = -EINVAL; + goto err_larbdev_put; } return 0;
@@ -1402,8 +1408,12 @@ static int mtk_iommu_probe(struct platform_device *pdev) iommu_device_sysfs_remove(&data->iommu); out_list_del: list_del(&data->list); - if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { device_link_remove(data->smicomm_dev, dev); + + for (i = 0; i < MTK_LARB_NR_MAX; i++) + put_device(data->larb_imu[i].dev); + } out_runtime_disable: pm_runtime_disable(dev); return ret; @@ -1423,6 +1433,9 @@ static void mtk_iommu_remove(struct platform_device *pdev) if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { device_link_remove(data->smicomm_dev, &pdev->dev); component_master_del(&pdev->dev, &mtk_iommu_com_ops); + + for (i = 0; i < MTK_LARB_NR_MAX; i++) + put_device(data->larb_imu[i].dev); } pm_runtime_disable(&pdev->dev); for (i = 0; i < data->plat_data->banks_num; i++) { diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index ee4e55b6b190..5a3eb7b587b4 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -423,6 +423,8 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, return -EINVAL;
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); + + put_device(&m4updev->dev); }
ret = iommu_fwspec_add_ids(dev, args->args, 1); @@ -645,8 +647,10 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev) struct platform_device *plarbdev;
larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); - if (!larbnode) - return -EINVAL; + if (!larbnode) { + ret = -EINVAL; + goto out_put_larbs; + }
if (!of_device_is_available(larbnode)) { of_node_put(larbnode); @@ -656,11 +660,14 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev) plarbdev = of_find_device_by_node(larbnode); if (!plarbdev) { of_node_put(larbnode); - return -ENODEV; + ret = -ENODEV; + goto out_put_larbs; } if (!plarbdev->dev.driver) { of_node_put(larbnode); - return -EPROBE_DEFER; + put_device(&plarbdev->dev); + ret = -EPROBE_DEFER; + goto out_put_larbs; } data->larb_imu[i].dev = &plarbdev->dev;
@@ -672,7 +679,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
ret = mtk_iommu_v1_hw_init(data); if (ret) - return ret; + goto out_put_larbs;
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, dev_name(&pdev->dev)); @@ -694,12 +701,17 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev) iommu_device_sysfs_remove(&data->iommu); out_clk_unprepare: clk_disable_unprepare(data->bclk); +out_put_larbs: + for (i = 0; i < MTK_LARB_NR_MAX; i++) + put_device(data->larb_imu[i].dev); + return ret; }
static void mtk_iommu_v1_remove(struct platform_device *pdev) { struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev); + int i;
iommu_device_sysfs_remove(&data->iommu); iommu_device_unregister(&data->iommu); @@ -707,6 +719,9 @@ static void mtk_iommu_v1_remove(struct platform_device *pdev) clk_disable_unprepare(data->bclk); devm_free_irq(&pdev->dev, data->irq, data); component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops); + + for (i = 0; i < MTK_LARB_NR_MAX; i++) + put_device(data->larb_imu[i].dev); }
static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev) diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index c9528065a59a..29f675aa7ea0 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1688,6 +1688,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) }
oiommu = platform_get_drvdata(pdev); + put_device(&pdev->dev); if (!oiommu) { of_node_put(np); kfree(arch_data); @@ -1695,7 +1696,6 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) }
tmp->iommu_dev = oiommu; - tmp->dev = &pdev->dev;
of_node_put(np); } diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 27697109ec79..50b39be61abc 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -88,7 +88,6 @@ struct omap_iommu { /** * struct omap_iommu_arch_data - omap iommu private data * @iommu_dev: handle of the OMAP iommu device - * @dev: handle of the iommu device * * This is an omap iommu private data object, which binds an iommu user * to its iommu device. This object should be placed at the iommu user's @@ -97,7 +96,6 @@ struct omap_iommu { */ struct omap_iommu_arch_data { struct omap_iommu *iommu_dev; - struct device *dev; };
struct cr_regs { diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 8d8f11854676..a27cbc761e26 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -837,6 +837,8 @@ static int sun50i_iommu_of_xlate(struct device *dev,
dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
+ put_device(&iommu_pdev->dev); + return iommu_fwspec_add_ids(dev, &id, 1); }
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 7f633bb5efef..23dad5134317 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -823,10 +823,9 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) return NULL;
mc = platform_get_drvdata(pdev); - if (!mc) { - put_device(&pdev->dev); + put_device(&pdev->dev); + if (!mc) return NULL; - }
return mc->smmu; } diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 70dee9ad4bae..78e6e7748fb9 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -306,15 +306,9 @@ static void capincci_alloc_minor(struct capidev *cdev, struct capincci *np) static void capincci_free_minor(struct capincci *np) { struct capiminor *mp = np->minorp; - struct tty_struct *tty;
if (mp) { - tty = tty_port_tty_get(&mp->port); - if (tty) { - tty_vhangup(tty); - tty_kref_put(tty); - } - + tty_port_tty_vhangup(&mp->port); capiminor_free(mp); } } diff --git a/drivers/leds/leds-cros_ec.c b/drivers/leds/leds-cros_ec.c index 275522b81ea5..d06e1cc71821 100644 --- a/drivers/leds/leds-cros_ec.c +++ b/drivers/leds/leds-cros_ec.c @@ -155,9 +155,6 @@ static int cros_ec_led_count_subleds(struct device *dev, } }
- if (!num_subleds) - return -EINVAL; - *max_brightness = common_range; return num_subleds; } @@ -202,6 +199,8 @@ static int cros_ec_led_probe_one(struct device *dev, struct cros_ec_device *cros &priv->led_mc_cdev.led_cdev.max_brightness); if (num_subleds < 0) return num_subleds; + if (num_subleds == 0) + return 0; /* LED without any colors, skip */
priv->cros_ec = cros_ec; priv->led_id = id; diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c index e9eb0ad6751d..2037f4f90c76 100644 --- a/drivers/leds/leds-lp50xx.c +++ b/drivers/leds/leds-lp50xx.c @@ -52,11 +52,17 @@
#define LP50XX_SW_RESET 0xff #define LP50XX_CHIP_EN BIT(6) +#define LP50XX_CHIP_DISABLE 0x00 +#define LP50XX_START_TIME_US 500 +#define LP50XX_RESET_TIME_US 3 + +#define LP50XX_EN_GPIO_LOW 0 +#define LP50XX_EN_GPIO_HIGH 1
/* There are 3 LED outputs per bank */ #define LP50XX_LEDS_PER_MODULE 3
-#define LP5009_MAX_LED_MODULES 2 +#define LP5009_MAX_LED_MODULES 3 #define LP5012_MAX_LED_MODULES 4 #define LP5018_MAX_LED_MODULES 6 #define LP5024_MAX_LED_MODULES 8 @@ -343,17 +349,15 @@ static int lp50xx_brightness_set(struct led_classdev *cdev, return ret; }
-static int lp50xx_set_banks(struct lp50xx *priv, u32 led_banks[]) +static int lp50xx_set_banks(struct lp50xx *priv, u32 led_banks[], int num_leds) { u8 led_config_lo, led_config_hi; u32 bank_enable_mask = 0; int ret; int i;
- for (i = 0; i < priv->chip_info->max_modules; i++) { - if (led_banks[i]) - bank_enable_mask |= (1 << led_banks[i]); - } + for (i = 0; i < num_leds; i++) + bank_enable_mask |= (1 << led_banks[i]);
led_config_lo = bank_enable_mask; led_config_hi = bank_enable_mask >> 8; @@ -373,19 +377,42 @@ static int lp50xx_reset(struct lp50xx *priv) return regmap_write(priv->regmap, priv->chip_info->reset_reg, LP50XX_SW_RESET); }
-static int lp50xx_enable_disable(struct lp50xx *priv, int enable_disable) +static int lp50xx_enable(struct lp50xx *priv) { int ret;
- ret = gpiod_direction_output(priv->enable_gpio, enable_disable); + if (priv->enable_gpio) { + ret = gpiod_direction_output(priv->enable_gpio, LP50XX_EN_GPIO_HIGH); + if (ret) + return ret; + + udelay(LP50XX_START_TIME_US); + } + + ret = lp50xx_reset(priv); if (ret) return ret;
- if (enable_disable) - return regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_EN); - else - return regmap_write(priv->regmap, LP50XX_DEV_CFG0, 0); + return regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_EN); +} + +static int lp50xx_disable(struct lp50xx *priv) +{ + int ret; + + ret = regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_DISABLE); + if (ret) + return ret;
+ if (priv->enable_gpio) { + ret = gpiod_direction_output(priv->enable_gpio, LP50XX_EN_GPIO_LOW); + if (ret) + return ret; + + udelay(LP50XX_RESET_TIME_US); + } + + return 0; }
static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv, @@ -407,7 +434,7 @@ static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv, return ret; }
- ret = lp50xx_set_banks(priv, led_banks); + ret = lp50xx_set_banks(priv, led_banks, num_leds); if (ret) { dev_err(priv->dev, "Cannot setup banked LEDs\n"); return ret; @@ -450,6 +477,10 @@ static int lp50xx_probe_dt(struct lp50xx *priv) return dev_err_probe(priv->dev, PTR_ERR(priv->enable_gpio), "Failed to get enable GPIO\n");
+ ret = lp50xx_enable(priv); + if (ret) + return ret; + priv->regulator = devm_regulator_get(priv->dev, "vled"); if (IS_ERR(priv->regulator)) priv->regulator = NULL; @@ -556,14 +587,6 @@ static int lp50xx_probe(struct i2c_client *client) return ret; }
- ret = lp50xx_reset(led); - if (ret) - return ret; - - ret = lp50xx_enable_disable(led, 1); - if (ret) - return ret; - return lp50xx_probe_dt(led); }
@@ -572,7 +595,7 @@ static void lp50xx_remove(struct i2c_client *client) struct lp50xx *led = i2c_get_clientdata(client); int ret;
- ret = lp50xx_enable_disable(led, 0); + ret = lp50xx_disable(led); if (ret) dev_err(led->dev, "Failed to disable chip\n");
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index aaa21fe295f2..c3252fdc75fd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1375,7 +1375,7 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio { unsigned int n_sectors; sector_t sector; - unsigned int offset, end; + unsigned int offset, end, align;
b->end_io = end_io;
@@ -1389,9 +1389,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio b->c->write_callback(b); offset = b->write_start; end = b->write_end; - offset &= -DM_BUFIO_WRITE_ALIGN; - end += DM_BUFIO_WRITE_ALIGN - 1; - end &= -DM_BUFIO_WRITE_ALIGN; + align = max(DM_BUFIO_WRITE_ALIGN, + bdev_physical_block_size(b->c->bdev)); + offset &= -align; + end += align - 1; + end &= -align; if (unlikely(end > b->c->block_size)) end = b->c->block_size;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index b19b0142a690..bb8dcfd56e4b 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -103,7 +103,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, } else { flush_dcache_page(bv->bv_page); memcpy(ba, pa, cur_len); - dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len); + dm_bufio_mark_buffer_dirty(b); }
dm_bufio_release(b); diff --git a/drivers/md/md.c b/drivers/md/md.c index 5c39246c467e..26056d53f40c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3729,7 +3729,6 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
static int analyze_sbs(struct mddev *mddev) { - int i; struct md_rdev *rdev, *freshest, *tmp;
freshest = NULL; @@ -3756,11 +3755,9 @@ static int analyze_sbs(struct mddev *mddev) super_types[mddev->major_version]. validate_super(mddev, NULL/*freshest*/, freshest);
- i = 0; rdev_for_each_safe(rdev, tmp, mddev) { if (mddev->max_disks && - (rdev->desc_nr >= mddev->max_disks || - i > mddev->max_disks)) { + rdev->desc_nr >= mddev->max_disks) { pr_warn("md: %s: %pg: only %d devices permitted\n", mdname(mddev), rdev->bdev, mddev->max_disks); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b0062ad9b1d9..a91911a9fc03 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1626,11 +1626,10 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) return -EAGAIN;
- if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) { + if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) { bio_wouldblock_error(bio); return 0; } - wait_barrier(conf, false);
/* * Check reshape again to avoid reshape happens after checking diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8e5ccca3b68b..7262b77a8e02 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7181,12 +7181,14 @@ raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) err = mddev_suspend_and_lock(mddev); if (err) return err; + conf = mddev->private; + if (!conf) { + mddev_unlock_and_resume(mddev); + return -ENODEV; + } raid5_quiesce(mddev, true);
- conf = mddev->private; - if (!conf) - err = -ENODEV; - else if (new != conf->worker_cnt_per_group) { + if (new != conf->worker_cnt_per_group) { old_groups = conf->worker_groups; if (old_groups) flush_workqueue(raid5_wq); diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c index 48282d272fe6..865d86f34add 100644 --- a/drivers/media/cec/core/cec-core.c +++ b/drivers/media/cec/core/cec-core.c @@ -420,6 +420,7 @@ static int __init cec_devnode_init(void)
ret = bus_register(&cec_bus_type); if (ret < 0) { + debugfs_remove_recursive(top_cec_dir); unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES); pr_warn("cec: bus_register failed\n"); return -EIO; diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index bb0b7fa67b53..faad72f79545 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -258,6 +258,7 @@ static void *vb2_dc_alloc(struct vb2_buffer *vb,
if (ret) { dev_err(dev, "dma alloc of size %lu failed\n", size); + put_device(buf->dev); kfree(buf); return ERR_PTR(-ENOMEM); } diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index a3f4b4ad35aa..11b6d13cfb96 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -3620,7 +3620,7 @@ static int adv76xx_probe(struct i2c_client *client) err = media_entity_pads_init(&sd->entity, state->source_pad + 1, state->pads); if (err) - goto err_work_queues; + goto err_i2c;
/* Configure regmaps */ err = configure_regmaps(state); @@ -3661,8 +3661,6 @@ static int adv76xx_probe(struct i2c_client *client)
err_entity: media_entity_cleanup(&sd->entity); -err_work_queues: - cancel_delayed_work(&state->delayed_work_enable_hotplug); err_i2c: adv76xx_unregister_clients(state); err_hdl: diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 61ea7393066d..5864477d9355 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -2689,6 +2689,7 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd) /* CP block */ struct adv7842_state *state = to_state(sd); struct v4l2_dv_timings timings; + int temp; u8 reg_io_0x02 = io_read(sd, 0x02); u8 reg_io_0x21 = io_read(sd, 0x21); u8 reg_rep_0x77 = rep_read(sd, 0x77); @@ -2811,8 +2812,9 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd) (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ? "(16-235)" : "(0-255)", (reg_io_0x02 & 0x08) ? "enabled" : "disabled"); + temp = cp_read(sd, 0xf4) >> 4; v4l2_info(sd, "Color space conversion: %s\n", - csc_coeff_sel_rb[cp_read(sd, 0xf4) >> 4]); + temp < 0 ? "" : csc_coeff_sel_rb[temp]);
if (!is_digital_input(sd)) return 0; @@ -2842,8 +2844,9 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd) hdmi_read(sd, 0x5f)); v4l2_info(sd, "AV Mute: %s\n", (hdmi_read(sd, 0x04) & 0x40) ? "on" : "off"); + temp = hdmi_read(sd, 0x0b) >> 6; v4l2_info(sd, "Deep color mode: %s\n", - deep_color_mode_txt[hdmi_read(sd, 0x0b) >> 6]); + temp < 0 ? "" : deep_color_mode_txt[temp]);
adv7842_log_infoframes(sd);
@@ -3570,7 +3573,7 @@ static int adv7842_probe(struct i2c_client *client) err = media_entity_pads_init(&sd->entity, ADV7842_PAD_SOURCE + 1, state->pads); if (err) - goto err_work_queues; + goto err_i2c;
err = adv7842_core_init(sd); if (err) @@ -3591,8 +3594,6 @@ static int adv7842_probe(struct i2c_client *client)
err_entity: media_entity_cleanup(&sd->entity); -err_work_queues: - cancel_delayed_work(&state->delayed_work_enable_hotplug); err_i2c: adv7842_unregister_clients(sd); err_hdl: diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c index eaa1496c71bb..e0714abe8540 100644 --- a/drivers/media/i2c/imx219.c +++ b/drivers/media/i2c/imx219.c @@ -843,7 +843,7 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd, const struct imx219_mode *mode; struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; - unsigned int bin_h, bin_v; + unsigned int bin_h, bin_v, binning;
mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), @@ -862,9 +862,12 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd, bin_h = min(IMX219_PIXEL_ARRAY_WIDTH / format->width, 2U); bin_v = min(IMX219_PIXEL_ARRAY_HEIGHT / format->height, 2U);
+ /* Ensure bin_h and bin_v are same to avoid 1:2 or 2:1 stretching */ + binning = min(bin_h, bin_v); + crop = v4l2_subdev_state_get_crop(state, 0); - crop->width = format->width * bin_h; - crop->height = format->height * bin_v; + crop->width = format->width * binning; + crop->height = format->height * binning; crop->left = (IMX219_NATIVE_WIDTH - crop->width) / 2; crop->top = (IMX219_NATIVE_HEIGHT - crop->height) / 2;
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c index ecabc0e1d32e..1d9f41dd7c21 100644 --- a/drivers/media/i2c/msp3400-kthreads.c +++ b/drivers/media/i2c/msp3400-kthreads.c @@ -596,6 +596,8 @@ int msp3400c_thread(void *data) "carrier2 val: %5d / %s\n", val, cd[i].name); }
+ if (max1 < 0 || max1 > 3) + goto restart; /* program the msp3400 according to the results */ state->main = msp3400c_carrier_detect_main[max1].cdo; switch (max1) { diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c index 3b7e5ff5b010..910e0c90c494 100644 --- a/drivers/media/i2c/tda1997x.c +++ b/drivers/media/i2c/tda1997x.c @@ -2798,7 +2798,6 @@ static int tda1997x_probe(struct i2c_client *client) err_free_handler: v4l2_ctrl_handler_free(&state->hdl); err_free_mutex: - cancel_delayed_work(&state->delayed_work_enable_hpd); mutex_destroy(&state->page_lock); mutex_destroy(&state->lock); tda1997x_set_power(state, 0); diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c index 4769c053c6c2..7e87681ac6e3 100644 --- a/drivers/media/platform/amphion/vpu_malone.c +++ b/drivers/media/platform/amphion/vpu_malone.c @@ -25,6 +25,10 @@ #include "vpu_imx8q.h" #include "vpu_malone.h"
+static bool low_latency; +module_param(low_latency, bool, 0644); +MODULE_PARM_DESC(low_latency, "Set low latency frame flush mode: 0 (disable) or 1 (enable)"); + #define CMD_SIZE 25600 #define MSG_SIZE 25600 #define CODEC_SIZE 0x1000 @@ -1311,22 +1315,18 @@ static int vpu_malone_insert_scode_vc1_g_seq(struct malone_scode_t *scode) { if (!scode->inst->total_input_count) return 0; - if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb))) - scode->need_data = 0; return 0; }
static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode) { - struct vb2_v4l2_buffer *vbuf; u8 nal_hdr[MALONE_VC1_NAL_HEADER_LEN]; u32 *data = NULL; int ret;
- vbuf = to_vb2_v4l2_buffer(scode->vb); data = vb2_plane_vaddr(scode->vb, 0);
- if (scode->inst->total_input_count == 0 || vpu_vb_is_codecconfig(vbuf)) + if (scode->inst->total_input_count == 0) return 0; if (MALONE_VC1_CONTAIN_NAL(*data)) return 0; @@ -1347,8 +1347,6 @@ static int vpu_malone_insert_scode_vc1_l_seq(struct malone_scode_t *scode) int size = 0; u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
- if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb))) - scode->need_data = 0; if (scode->inst->total_input_count) return 0; scode->need_data = 0; @@ -1534,7 +1532,7 @@ static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str scode.vb = vb; scode.wptr = wptr; scode.need_data = 1; - if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf)) + if (vbuf->sequence == 0) ret = vpu_malone_insert_scode(&scode, SCODE_SEQUENCE);
if (ret < 0) @@ -1562,7 +1560,15 @@ static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str
vpu_malone_update_wptr(str_buf, wptr);
- if (disp_imm && !vpu_vb_is_codecconfig(vbuf)) { + /* + * Enable the low latency flush mode if display delay is set to 0 + * or the low latency frame flush mode if it is set to 1. + * The low latency flush mode requires some padding data to be appended to each frame, + * but there must not be any padding data between the sequence header and the frame. + * This module is currently only supported for the H264 and HEVC formats, + * for other formats, vpu_malone_add_scode() will return 0. + */ + if (disp_imm || low_latency) { ret = vpu_malone_add_scode(inst->core->iface, inst->id, &inst->stream_buffer, @@ -1609,7 +1615,6 @@ int vpu_malone_input_frame(struct vpu_shared_addr *shared, struct vpu_inst *inst, struct vb2_buffer *vb) { struct vpu_dec_ctrl *hc = shared->priv; - struct vb2_v4l2_buffer *vbuf; struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[inst->id]; u32 disp_imm = hc->codec_param[inst->id].disp_imm; u32 size; @@ -1623,16 +1628,6 @@ int vpu_malone_input_frame(struct vpu_shared_addr *shared, return ret; size = ret;
- /* - * if buffer only contain codec data, and the timestamp is invalid, - * don't put the invalid timestamp to resync - * merge the data to next frame - */ - vbuf = to_vb2_v4l2_buffer(vb); - if (vpu_vb_is_codecconfig(vbuf)) { - inst->extra_size += size; - return 0; - } if (inst->extra_size) { size += inst->extra_size; inst->extra_size = 0; diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c index 7f66bfef2abb..87f5b1d892ca 100644 --- a/drivers/media/platform/amphion/vpu_v4l2.c +++ b/drivers/media/platform/amphion/vpu_v4l2.c @@ -24,6 +24,11 @@ #include "vpu_msgs.h" #include "vpu_helpers.h"
+static char *vpu_type_name(u32 type) +{ + return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture"; +} + void vpu_inst_lock(struct vpu_inst *inst) { mutex_lock(&inst->lock); @@ -42,7 +47,7 @@ dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no) vb->planes[plane_no].data_offset; }
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no) +static unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no) { if (plane_no >= vb->num_planes) return 0; @@ -81,7 +86,7 @@ void vpu_v4l2_set_error(struct vpu_inst *inst) vpu_inst_unlock(inst); }
-int vpu_notify_eos(struct vpu_inst *inst) +static int vpu_notify_eos(struct vpu_inst *inst) { static const struct v4l2_event ev = { .id = 0, @@ -344,16 +349,6 @@ struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst) if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) return NULL;
- while (vpu_vb_is_codecconfig(src_buf)) { - v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx); - vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE); - v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); - - src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx); - if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) - return NULL; - } - return src_buf; }
@@ -562,7 +557,8 @@ static void vpu_vb2_buf_finish(struct vb2_buffer *vb) call_void_vop(inst, on_queue_empty, q->type); }
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state) +static void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, + enum vb2_buffer_state state) { struct vb2_v4l2_buffer *buf;
@@ -698,15 +694,15 @@ static int vpu_v4l2_release(struct vpu_inst *inst) { vpu_trace(inst->vpu->dev, "%p\n", inst);
- vpu_release_core(inst->core); - put_device(inst->dev); - if (inst->workqueue) { cancel_work_sync(&inst->msg_work); destroy_workqueue(inst->workqueue); inst->workqueue = NULL; }
+ vpu_release_core(inst->core); + put_device(inst->dev); + v4l2_ctrl_handler_free(&inst->ctrl_handler); mutex_destroy(&inst->lock);
diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h index 56f2939fa84d..da9945f25e32 100644 --- a/drivers/media/platform/amphion/vpu_v4l2.h +++ b/drivers/media/platform/amphion/vpu_v4l2.h @@ -26,15 +26,12 @@ void vpu_skip_frame(struct vpu_inst *inst, int count); struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence); struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx); void vpu_v4l2_set_error(struct vpu_inst *inst); -int vpu_notify_eos(struct vpu_inst *inst); int vpu_notify_source_change(struct vpu_inst *inst); int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos); -void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state); int vpu_get_num_buffers(struct vpu_inst *inst, u32 type); bool vpu_is_source_empty(struct vpu_inst *inst);
dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no); -unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no); static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type) { if (V4L2_TYPE_IS_OUTPUT(type)) @@ -42,19 +39,4 @@ static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type) else return &inst->cap_format; } - -static inline char *vpu_type_name(u32 type) -{ - return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture"; -} - -static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf) -{ -#ifdef V4L2_BUF_FLAG_CODECCONFIG - return (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG) ? 1 : 0; -#else - return 0; -#endif -} - #endif diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c index 37e7b985d52c..afa47649dd4c 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c @@ -176,10 +176,18 @@ void mdp_video_device_release(struct video_device *vdev) kfree(mdp); }
+static void mdp_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + static int mdp_mm_subsys_deploy(struct mdp_dev *mdp, enum mdp_infra_id id) { struct platform_device *mm_pdev = NULL; struct device **dev; + int ret; int i;
if (!mdp) @@ -213,6 +221,11 @@ static int mdp_mm_subsys_deploy(struct mdp_dev *mdp, enum mdp_infra_id id) if (WARN_ON(!mm_pdev)) return -ENODEV;
+ ret = devm_add_action_or_reset(&mdp->pdev->dev, mdp_put_device, + &mm_pdev->dev); + if (ret) + return ret; + *dev = &mm_pdev->dev; }
@@ -298,6 +311,7 @@ static int mdp_probe(struct platform_device *pdev) goto err_destroy_clock_wq; } mdp->scp = platform_get_drvdata(mm_pdev); + put_device(&mm_pdev->dev); }
mdp->rproc_handle = scp_get_rproc(mdp->scp); diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c index d7027d600208..3632037f78f5 100644 --- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c +++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c @@ -47,30 +47,32 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv) { struct mtk_vcodec_dec_dev *dev = priv; struct mtk_vcodec_dec_ctx *ctx; + unsigned long flags;
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_ctx_lock); + spin_lock_irqsave(&dev->dev_ctx_lock, flags); list_for_each_entry(ctx, &dev->ctx_list, list) { ctx->state = MTK_STATE_ABORT; mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id); } - mutex_unlock(&dev->dev_ctx_lock); + spin_unlock_irqrestore(&dev->dev_ctx_lock, flags); }
static void mtk_vcodec_vpu_reset_enc_handler(void *priv) { struct mtk_vcodec_enc_dev *dev = priv; struct mtk_vcodec_enc_ctx *ctx; + unsigned long flags;
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_ctx_lock); + spin_lock_irqsave(&dev->dev_ctx_lock, flags); list_for_each_entry(ctx, &dev->ctx_list, list) { ctx->state = MTK_STATE_ABORT; mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id); } - mutex_unlock(&dev->dev_ctx_lock); + spin_unlock_irqrestore(&dev->dev_ctx_lock, flags); }
static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = { @@ -117,8 +119,10 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_enc_handler, priv, rst_id);
fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL); - if (!fw) + if (!fw) { + put_device(&fw_pdev->dev); return ERR_PTR(-ENOMEM); + } fw->type = VPU; fw->ops = &mtk_vcodec_vpu_msg; fw->pdev = fw_pdev; diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c index 2073781ccadb..fc920c4d6181 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c @@ -198,6 +198,7 @@ static int fops_vcodec_open(struct file *file) struct mtk_vcodec_dec_ctx *ctx = NULL; int ret = 0, i, hw_count; struct vb2_queue *src_vq; + unsigned long flags;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -268,9 +269,9 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
- mutex_lock(&dev->dev_ctx_lock); + spin_lock_irqsave(&dev->dev_ctx_lock, flags); list_add(&ctx->list, &dev->ctx_list); - mutex_unlock(&dev->dev_ctx_lock); + spin_unlock_irqrestore(&dev->dev_ctx_lock, flags); mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex); @@ -295,6 +296,7 @@ static int fops_vcodec_release(struct file *file) { struct mtk_vcodec_dec_dev *dev = video_drvdata(file); struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(file->private_data); + unsigned long flags;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] decoder", ctx->id); mutex_lock(&dev->dev_mutex); @@ -313,9 +315,9 @@ static int fops_vcodec_release(struct file *file) v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
mtk_vcodec_dbgfs_remove(dev, ctx->id); - mutex_lock(&dev->dev_ctx_lock); + spin_lock_irqsave(&dev->dev_ctx_lock, flags); list_del_init(&ctx->list); - mutex_unlock(&dev->dev_ctx_lock); + spin_unlock_irqrestore(&dev->dev_ctx_lock, flags); kfree(ctx); mutex_unlock(&dev->dev_mutex); return 0; @@ -408,7 +410,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev) for (i = 0; i < MTK_VDEC_HW_MAX; i++) mutex_init(&dev->dec_mutex[i]); mutex_init(&dev->dev_mutex); - mutex_init(&dev->dev_ctx_lock); + spin_lock_init(&dev->dev_ctx_lock); spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s", diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h index ac568ed14fa2..309f0e5c9c6c 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h +++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h @@ -283,7 +283,7 @@ struct mtk_vcodec_dec_dev { /* decoder hardware mutex lock */ struct mutex dec_mutex[MTK_VDEC_HW_MAX]; struct mutex dev_mutex; - struct mutex dev_ctx_lock; + spinlock_t dev_ctx_lock; struct workqueue_struct *decode_workqueue;
spinlock_t irqlock; diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c index 145958206e38..40b97f114cf6 100644 --- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c @@ -75,16 +75,17 @@ static void handle_get_param_msg_ack(const struct vdec_vpu_ipi_get_param_ack *ms static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vdec_vpu_inst *vpu) { struct mtk_vcodec_dec_ctx *ctx; + unsigned long flags; int ret = false;
- mutex_lock(&dec_dev->dev_ctx_lock); + spin_lock_irqsave(&dec_dev->dev_ctx_lock, flags); list_for_each_entry(ctx, &dec_dev->ctx_list, list) { if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) { ret = true; break; } } - mutex_unlock(&dec_dev->dev_ctx_lock); + spin_unlock_irqrestore(&dec_dev->dev_ctx_lock, flags);
return ret; } diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c index 3cb8a1622222..6dcd6e904732 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c @@ -117,6 +117,7 @@ static int fops_vcodec_open(struct file *file) struct mtk_vcodec_enc_ctx *ctx = NULL; int ret = 0; struct vb2_queue *src_vq; + unsigned long flags;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -177,9 +178,9 @@ static int fops_vcodec_open(struct file *file) mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ", ctx->id, ctx, ctx->m2m_ctx);
- mutex_lock(&dev->dev_ctx_lock); + spin_lock_irqsave(&dev->dev_ctx_lock, flags); list_add(&ctx->list, &dev->ctx_list); - mutex_unlock(&dev->dev_ctx_lock); + spin_unlock_irqrestore(&dev->dev_ctx_lock, flags);
mutex_unlock(&dev->dev_mutex); mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev), @@ -204,6 +205,7 @@ static int fops_vcodec_release(struct file *file) { struct mtk_vcodec_enc_dev *dev = video_drvdata(file); struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(file->private_data); + unsigned long flags;
mtk_v4l2_venc_dbg(1, ctx, "[%d] encoder", ctx->id); mutex_lock(&dev->dev_mutex); @@ -214,9 +216,9 @@ static int fops_vcodec_release(struct file *file) v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- mutex_lock(&dev->dev_ctx_lock); + spin_lock_irqsave(&dev->dev_ctx_lock, flags); list_del_init(&ctx->list); - mutex_unlock(&dev->dev_ctx_lock); + spin_unlock_irqrestore(&dev->dev_ctx_lock, flags); kfree(ctx); mutex_unlock(&dev->dev_mutex); return 0; @@ -298,7 +300,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mutex_init(&dev->enc_mutex); mutex_init(&dev->dev_mutex); - mutex_init(&dev->dev_ctx_lock); + spin_lock_init(&dev->dev_ctx_lock); spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s", diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h index 0bd85d0fb379..90b4f5ea0d6b 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h +++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h @@ -206,7 +206,7 @@ struct mtk_vcodec_enc_dev { /* encoder hardware mutex lock */ struct mutex enc_mutex; struct mutex dev_mutex; - struct mutex dev_ctx_lock; + spinlock_t dev_ctx_lock; struct workqueue_struct *encode_workqueue;
int enc_irq; diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c index 51bb7ee141b9..3c229b1f6b21 100644 --- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c @@ -45,16 +45,17 @@ static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data) static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct venc_vpu_inst *vpu) { struct mtk_vcodec_enc_ctx *ctx; + unsigned long flags; int ret = false;
- mutex_lock(&enc_dev->dev_ctx_lock); + spin_lock_irqsave(&enc_dev->dev_ctx_lock, flags); list_for_each_entry(ctx, &enc_dev->ctx_list, list) { if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) { ret = true; break; } } - mutex_unlock(&enc_dev->dev_ctx_lock); + spin_unlock_irqrestore(&enc_dev->dev_ctx_lock, flags);
return ret; } diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c index f21d05054341..6b762c43b495 100644 --- a/drivers/media/platform/renesas/rcar_drif.c +++ b/drivers/media/platform/renesas/rcar_drif.c @@ -1249,6 +1249,7 @@ static struct device_node *rcar_drif_bond_enabled(struct platform_device *p) if (np && of_device_is_available(np)) return np;
+ of_node_put(np); return NULL; }
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c index 5f10bb4eb4f7..83f0fa078089 100644 --- a/drivers/media/platform/samsung/exynos4-is/media-dev.c +++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c @@ -1410,12 +1410,14 @@ static int subdev_notifier_complete(struct v4l2_async_notifier *notifier) mutex_lock(&fmd->media_dev.graph_mutex);
ret = fimc_md_create_links(fmd); - if (ret < 0) - goto unlock; + if (ret < 0) { + mutex_unlock(&fmd->media_dev.graph_mutex); + return ret; + }
- ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev); -unlock: mutex_unlock(&fmd->media_dev.graph_mutex); + + ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev); if (ret < 0) return ret;
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c index 16326437767f..d8beebd1b090 100644 --- a/drivers/media/platform/ti/davinci/vpif_capture.c +++ b/drivers/media/platform/ti/davinci/vpif_capture.c @@ -1602,7 +1602,7 @@ vpif_capture_get_pdata(struct platform_device *pdev, * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ -static __init int vpif_probe(struct platform_device *pdev) +static int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct i2c_adapter *i2c_adap; @@ -1809,7 +1809,7 @@ static int vpif_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
-static __refdata struct platform_driver vpif_driver = { +static struct platform_driver vpif_driver = { .driver = { .name = VPIF_DRIVER_NAME, .pm = &vpif_pm_ops, diff --git a/drivers/media/platform/ti/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c index 76d8fa8ad088..1fe05a02a6e7 100644 --- a/drivers/media/platform/ti/davinci/vpif_display.c +++ b/drivers/media/platform/ti/davinci/vpif_display.c @@ -1216,7 +1216,7 @@ static int vpif_probe_complete(void) * vpif_probe: This function creates device entries by register itself to the * V4L2 driver and initializes fields of each channel objects */ -static __init int vpif_probe(struct platform_device *pdev) +static int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct i2c_adapter *i2c_adap; @@ -1392,7 +1392,7 @@ static int vpif_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume);
-static __refdata struct platform_driver vpif_driver = { +static struct platform_driver vpif_driver = { .driver = { .name = VPIF_DRIVER_NAME, .pm = &vpif_pm_ops, diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c index 5c1d799d8618..022d16294ae0 100644 --- a/drivers/media/platform/verisilicon/hantro_g2.c +++ b/drivers/media/platform/verisilicon/hantro_g2.c @@ -5,43 +5,93 @@ * Copyright (C) 2021 Collabora Ltd, Andrzej Pietrasiewicz andrzej.p@collabora.com */
+#include <linux/delay.h> #include "hantro_hw.h" #include "hantro_g2_regs.h"
#define G2_ALIGN 16
-void hantro_g2_check_idle(struct hantro_dev *vpu) +static bool hantro_g2_active(struct hantro_ctx *ctx) { - int i; - - for (i = 0; i < 3; i++) { - u32 status; - - /* Make sure the VPU is idle */ - status = vdpu_read(vpu, G2_REG_INTERRUPT); - if (status & G2_REG_INTERRUPT_DEC_E) { - dev_warn(vpu->dev, "device still running, aborting"); - status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS; - vdpu_write(vpu, status, G2_REG_INTERRUPT); - } + struct hantro_dev *vpu = ctx->dev; + u32 status; + + status = vdpu_read(vpu, G2_REG_INTERRUPT); + + return (status & G2_REG_INTERRUPT_DEC_E); +} + +/** + * hantro_g2_reset: + * @ctx: the hantro context + * + * Emulates a reset using Hantro abort function. Failing this procedure would + * results in programming a running IP which leads to CPU hang. + * + * Using a hard reset procedure instead is prefferred. + */ +void hantro_g2_reset(struct hantro_ctx *ctx) +{ + struct hantro_dev *vpu = ctx->dev; + u32 status; + + status = vdpu_read(vpu, G2_REG_INTERRUPT); + if (status & G2_REG_INTERRUPT_DEC_E) { + dev_warn_ratelimited(vpu->dev, "device still running, aborting"); + status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS; + vdpu_write(vpu, status, G2_REG_INTERRUPT); + + do { + mdelay(1); + } while (hantro_g2_active(ctx)); } }
irqreturn_t hantro_g2_irq(int irq, void *dev_id) { struct hantro_dev *vpu = dev_id; - enum vb2_buffer_state state; u32 status;
status = vdpu_read(vpu, G2_REG_INTERRUPT); - state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ? - VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
- vdpu_write(vpu, 0, G2_REG_INTERRUPT); - vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG); + if (!(status & G2_REG_INTERRUPT_DEC_IRQ)) + return IRQ_NONE; + + hantro_reg_write(vpu, &g2_dec_irq, 0); + hantro_reg_write(vpu, &g2_dec_int_stat, 0); + hantro_reg_write(vpu, &g2_clk_gate_e, 1); + + if (status & G2_REG_INTERRUPT_DEC_RDY_INT) { + hantro_irq_done(vpu, VB2_BUF_STATE_DONE); + return IRQ_HANDLED; + } + + if (status & G2_REG_INTERRUPT_DEC_ABORT_INT) { + /* disabled on abort, though lets be safe and handle it */ + dev_warn_ratelimited(vpu->dev, "decode operation aborted."); + return IRQ_HANDLED; + } + + if (status & G2_REG_INTERRUPT_DEC_LAST_SLICE_INT) + dev_warn_ratelimited(vpu->dev, "not all macroblocks were decoded."); + + if (status & G2_REG_INTERRUPT_DEC_BUS_INT) + dev_warn_ratelimited(vpu->dev, "bus error detected."); + + if (status & G2_REG_INTERRUPT_DEC_ERROR_INT) + dev_warn_ratelimited(vpu->dev, "decode error detected."); + + if (status & G2_REG_INTERRUPT_DEC_TIMEOUT) + dev_warn_ratelimited(vpu->dev, "frame decode timed out.");
- hantro_irq_done(vpu, state); + /** + * If the decoding haven't stopped, let it continue. The hardware timeout + * will trigger if it is trully stuck. + */ + if (status & G2_REG_INTERRUPT_DEC_E) + return IRQ_HANDLED;
+ hantro_irq_done(vpu, VB2_BUF_STATE_ERROR); return IRQ_HANDLED; }
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index 0e212198dd65..e8c2e83379de 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -283,6 +283,15 @@ static void set_params(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_apf_threshold, 8); }
+static u32 get_dpb_index(const struct v4l2_ctrl_hevc_decode_params *decode_params, + const u32 index) +{ + if (index > decode_params->num_active_dpb_entries) + return 0; + + return index; +} + static void set_ref_pic_list(struct hantro_ctx *ctx) { const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls; @@ -355,8 +364,10 @@ static void set_ref_pic_list(struct hantro_ctx *ctx) list1[j++] = list1[i++];
for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) { - hantro_reg_write(vpu, &ref_pic_regs0[i], list0[i]); - hantro_reg_write(vpu, &ref_pic_regs1[i], list1[i]); + hantro_reg_write(vpu, &ref_pic_regs0[i], + get_dpb_index(decode_params, list0[i])); + hantro_reg_write(vpu, &ref_pic_regs1[i], + get_dpb_index(decode_params, list1[i])); } }
@@ -582,8 +593,6 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx) struct hantro_dev *vpu = ctx->dev; int ret;
- hantro_g2_check_idle(vpu); - /* Prepare HEVC decoder context. */ ret = hantro_hevc_dec_prepare_run(ctx); if (ret) diff --git a/drivers/media/platform/verisilicon/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h index b943b1816db7..c614951121c7 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_regs.h +++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h @@ -22,7 +22,14 @@ #define G2_REG_VERSION G2_SWREG(0)
#define G2_REG_INTERRUPT G2_SWREG(1) +#define G2_REG_INTERRUPT_DEC_LAST_SLICE_INT BIT(19) +#define G2_REG_INTERRUPT_DEC_TIMEOUT BIT(18) +#define G2_REG_INTERRUPT_DEC_ERROR_INT BIT(16) +#define G2_REG_INTERRUPT_DEC_BUF_INT BIT(14) +#define G2_REG_INTERRUPT_DEC_BUS_INT BIT(13) #define G2_REG_INTERRUPT_DEC_RDY_INT BIT(12) +#define G2_REG_INTERRUPT_DEC_ABORT_INT BIT(11) +#define G2_REG_INTERRUPT_DEC_IRQ BIT(8) #define G2_REG_INTERRUPT_DEC_ABORT_E BIT(5) #define G2_REG_INTERRUPT_DEC_IRQ_DIS BIT(4) #define G2_REG_INTERRUPT_DEC_E BIT(0) @@ -35,6 +42,9 @@ #define BUS_WIDTH_128 2 #define BUS_WIDTH_256 3
+#define g2_dec_int_stat G2_DEC_REG(1, 11, 0xf) +#define g2_dec_irq G2_DEC_REG(1, 8, 0x1) + #define g2_strm_swap G2_DEC_REG(2, 28, 0xf) #define g2_strm_swap_old G2_DEC_REG(2, 27, 0x1f) #define g2_pic_swap G2_DEC_REG(2, 22, 0x1f) @@ -225,6 +235,9 @@ #define vp9_filt_level_seg5 G2_DEC_REG(19, 8, 0x3f) #define vp9_quant_seg5 G2_DEC_REG(19, 0, 0xff)
+#define g2_timemout_override_e G2_DEC_REG(45, 31, 0x1) +#define g2_timemout_cycles G2_DEC_REG(45, 0, 0x7fffffff) + #define hevc_cur_poc_00 G2_DEC_REG(46, 24, 0xff) #define hevc_cur_poc_01 G2_DEC_REG(46, 16, 0xff) #define hevc_cur_poc_02 G2_DEC_REG(46, 8, 0xff) diff --git a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c index 342e543dee4c..70e7cedc33b8 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c @@ -893,8 +893,6 @@ int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx) struct vb2_v4l2_buffer *dst; int ret;
- hantro_g2_check_idle(ctx->dev); - ret = start_prepare_run(ctx, &decode_params); if (ret) { hantro_end_prepare_run(ctx); diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h index c9b6556f8b2b..5f2011529f02 100644 --- a/drivers/media/platform/verisilicon/hantro_hw.h +++ b/drivers/media/platform/verisilicon/hantro_hw.h @@ -583,6 +583,7 @@ void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx); int hantro_vp9_dec_init(struct hantro_ctx *ctx); void hantro_vp9_dec_exit(struct hantro_ctx *ctx); void hantro_g2_check_idle(struct hantro_dev *vpu); +void hantro_g2_reset(struct hantro_ctx *ctx); irqreturn_t hantro_g2_irq(int irq, void *dev_id);
#endif /* HANTRO_HW_H_ */ diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c index f850d8bddef6..74fd985a8aad 100644 --- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c +++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c @@ -312,11 +312,13 @@ static const struct hantro_codec_ops imx8mq_vpu_g1_codec_ops[] = { static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = { [HANTRO_MODE_HEVC_DEC] = { .run = hantro_g2_hevc_dec_run, + .reset = hantro_g2_reset, .init = hantro_hevc_dec_init, .exit = hantro_hevc_dec_exit, }, [HANTRO_MODE_VP9_DEC] = { .run = hantro_g2_vp9_dec_run, + .reset = hantro_g2_reset, .done = hantro_g2_vp9_dec_done, .init = hantro_vp9_dec_init, .exit = hantro_vp9_dec_exit, diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c index 988b09191c4c..fd2f056f287b 100644 --- a/drivers/media/rc/st_rc.c +++ b/drivers/media/rc/st_rc.c @@ -284,7 +284,7 @@ static int st_rc_probe(struct platform_device *pdev) else rc_dev->rx_base = rc_dev->base;
- rc_dev->rstc = reset_control_get_optional_exclusive(dev, NULL); + rc_dev->rstc = devm_reset_control_get_optional_exclusive(dev, NULL); if (IS_ERR(rc_dev->rstc)) { ret = PTR_ERR(rc_dev->rstc); goto err; diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c index f3023e91b3eb..3541155c6fc6 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_channel.c +++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c @@ -461,12 +461,15 @@ int vidtv_channel_si_init(struct vidtv_mux *m)
/* assemble all programs and assign to PAT */ vidtv_psi_pat_program_assign(m->si.pat, programs); + programs = NULL;
/* assemble all services and assign to SDT */ vidtv_psi_sdt_service_assign(m->si.sdt, services); + services = NULL;
/* assemble all events and assign to EIT */ vidtv_psi_eit_event_assign(m->si.eit, events); + events = NULL;
m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat, m->pcr_pid); diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c index 56c9d521a34a..ee8a76fb5cc1 100644 --- a/drivers/media/usb/dvb-usb/dtv5100.c +++ b/drivers/media/usb/dvb-usb/dtv5100.c @@ -55,6 +55,11 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, } index = (addr << 8) + wbuf[0];
+ if (rlen > sizeof(st->data)) { + warn("rlen = %x is too big!\n", rlen); + return -EINVAL; + } + memcpy(st->data, rbuf, rlen); msleep(1); /* avoid I2C errors */ return usb_control_msg(d->udev, pipe, request, diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index 29cc207194b9..2de104736f87 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -3622,7 +3622,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw, pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Attempted to execute %d byte control-read transfer (limit=%d)", - write_len,PVR2_CTL_BUFFSIZE); + read_len, PVR2_CTL_BUFFSIZE); return -EINVAL; } if ((!write_len) && (!read_len)) { diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c index fb5f988e61f3..90c6902d537d 100644 --- a/drivers/mfd/altera-sysmgr.c +++ b/drivers/mfd/altera-sysmgr.c @@ -117,6 +117,8 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
sysmgr = dev_get_drvdata(dev);
+ put_device(dev); + return sysmgr->regmap; } EXPORT_SYMBOL_GPL(altr_sysmgr_regmap_lookup_by_phandle); diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c index 89b30ef91f4f..99ea0f0c7e43 100644 --- a/drivers/mfd/max77620.c +++ b/drivers/mfd/max77620.c @@ -253,7 +253,7 @@ static int max77620_irq_global_unmask(void *irq_drv_data) return ret; }
-static struct regmap_irq_chip max77620_top_irq_chip = { +static const struct regmap_irq_chip max77620_top_irq_chip = { .name = "max77620-top", .irqs = max77620_top_irqs, .num_irqs = ARRAY_SIZE(max77620_top_irqs), @@ -497,6 +497,7 @@ static int max77620_probe(struct i2c_client *client) const struct i2c_device_id *id = i2c_client_get_device_id(client); const struct regmap_config *rmap_config; struct max77620_chip *chip; + struct regmap_irq_chip *chip_desc; const struct mfd_cell *mfd_cells; int n_mfd_cells; bool pm_off; @@ -507,6 +508,14 @@ static int max77620_probe(struct i2c_client *client) return -ENOMEM;
i2c_set_clientdata(client, chip); + + chip_desc = devm_kmemdup(&client->dev, &max77620_top_irq_chip, + sizeof(max77620_top_irq_chip), + GFP_KERNEL); + if (!chip_desc) + return -ENOMEM; + chip_desc->irq_drv_data = chip; + chip->dev = &client->dev; chip->chip_irq = client->irq; chip->chip_id = (enum max77620_chip_id)id->driver_data; @@ -543,11 +552,9 @@ static int max77620_probe(struct i2c_client *client) if (ret < 0) return ret;
- max77620_top_irq_chip.irq_drv_data = chip; ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq, IRQF_ONESHOT | IRQF_SHARED, 0, - &max77620_top_irq_chip, - &chip->top_irq_data); + chip_desc, &chip->top_irq_data); if (ret < 0) { dev_err(chip->dev, "Failed to add regmap irq: %d\n", ret); return ret; diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index 67d9391f1855..903da9aa428f 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -49,7 +49,7 @@ config INTEL_MEI_TXE config INTEL_MEI_GSC tristate "Intel MEI GSC embedded device" depends on INTEL_MEI_ME - depends on DRM_I915 + depends on DRM_I915 || DRM_XE help Intel auxiliary driver for GSC devices embedded in Intel graphics devices.
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index c817d8c21641..6653fc53c951 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1778,8 +1778,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * @pages_lock . We keep holding @comm_lock since we will need it in a * second. */ - balloon_page_delete(page); - + balloon_page_finalize(page); put_page(page);
/* Inflate */ diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 7199cb0bd0b9..fb0a018d02cf 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -291,14 +291,14 @@ config MMC_SDHCI_ESDHC_MCF
config MMC_SDHCI_ESDHC_IMX tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller" - depends on ARCH_MXC || COMPILE_TEST + depends on ARCH_MXC || ARCH_S32 || COMPILE_TEST depends on MMC_SDHCI_PLTFM depends on OF select MMC_SDHCI_IO_ACCESSORS select MMC_CQHCI help This selects the Freescale eSDHC/uSDHC controller support - found on i.MX25, i.MX35 i.MX5x and i.MX6x. + found on i.MX25, i.MX35, i.MX5x, i.MX6x, and S32G.
If you have a controller with this interface, say Y or M here.
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 74234ee5f608..0589bbbc064b 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -344,41 +344,43 @@ static void sdhci_msm_v5_variant_writel_relaxed(u32 val, writel_relaxed(val, host->ioaddr + offset); }
-static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host) +static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host, + unsigned int clock, + unsigned int timing) { - struct mmc_ios ios = host->mmc->ios; /* * The SDHC requires internal clock frequency to be double the * actual clock that will be set for DDR mode. The controller * uses the faster clock(100/400MHz) for some of its parts and * send the actual required clock (50/200MHz) to the card. */ - if (ios.timing == MMC_TIMING_UHS_DDR50 || - ios.timing == MMC_TIMING_MMC_DDR52 || - ios.timing == MMC_TIMING_MMC_HS400 || + if (timing == MMC_TIMING_UHS_DDR50 || + timing == MMC_TIMING_MMC_DDR52 || + (timing == MMC_TIMING_MMC_HS400 && + clock == MMC_HS200_MAX_DTR) || host->flags & SDHCI_HS400_TUNING) return 2; return 1; }
static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, - unsigned int clock) + unsigned int clock, + unsigned int timing) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); - struct mmc_ios curr_ios = host->mmc->ios; struct clk *core_clk = msm_host->bulk_clks[0].clk; unsigned long achieved_rate; unsigned int desired_rate; unsigned int mult; int rc;
- mult = msm_get_clock_mult_for_bus_mode(host); + mult = msm_get_clock_mult_for_bus_mode(host, clock, timing); desired_rate = clock * mult; rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate); if (rc) { pr_err("%s: Failed to set clock at rate %u at timing %d\n", - mmc_hostname(host->mmc), desired_rate, curr_ios.timing); + mmc_hostname(host->mmc), desired_rate, timing); return; }
@@ -397,7 +399,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, msm_host->clk_rate = desired_rate;
pr_debug("%s: Setting clock at rate %lu at timing %d\n", - mmc_hostname(host->mmc), achieved_rate, curr_ios.timing); + mmc_hostname(host->mmc), achieved_rate, timing); }
/* Platform specific tuning */ @@ -1239,7 +1241,7 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) */ if (host->flags & SDHCI_HS400_TUNING) { sdhci_msm_hc_select_mode(host); - msm_set_clock_rate_for_bus_mode(host, ios.clock); + msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing); host->flags &= ~SDHCI_HS400_TUNING; }
@@ -1864,6 +1866,7 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct mmc_ios ios = host->mmc->ios;
if (!clock) { host->mmc->actual_clock = msm_host->clk_rate = 0; @@ -1872,7 +1875,7 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_msm_hc_select_mode(host);
- msm_set_clock_rate_for_bus_mode(host, clock); + msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing); out: __sdhci_msm_set_clock(host, clock); } diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 30daa2db80b1..e20266950254 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -99,7 +99,7 @@ #define HIWORD_UPDATE(val, mask, shift) \ ((val) << (shift) | (mask) << ((shift) + 16))
-#define CD_STABLE_TIMEOUT_US 1000000 +#define CD_STABLE_TIMEOUT_US 2000000 #define CD_STABLE_MAX_SLEEP_US 10
/** diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 6811a714349d..208532d86cc9 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -425,9 +425,12 @@ int add_mtd_partitions(struct mtd_info *parent,
mtd_add_partition_attrs(child);
- /* Look for subpartitions */ + /* Look for subpartitions (skip if no maching parser found) */ ret = parse_mtd_partitions(child, parts[i].types, NULL); - if (ret < 0) { + if (ret < 0 && ret == -ENOENT) { + pr_debug("Skip parsing subpartitions: %d\n", ret); + continue; + } else if (ret < 0) { pr_err("Failed to parse subpartitions: %d\n", ret); goto err_del_partitions; } diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 9f7ce5763e71..9d9b8ce4d5aa 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -254,6 +254,30 @@ static const struct flash_info winbond_nor_parts[] = { .id = SNOR_ID(0xef, 0x80, 0x20), .name = "w25q512nwm", .otp = SNOR_OTP(256, 3, 0x1000, 0x1000), + }, { + /* W25Q01NWxxIQ */ + .id = SNOR_ID(0xef, 0x60, 0x21), + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP, + }, { + /* W25Q01NWxxIM */ + .id = SNOR_ID(0xef, 0x80, 0x21), + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP, + }, { + /* W25Q02NWxxIM */ + .id = SNOR_ID(0xef, 0x80, 0x22), + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP, + }, { + /* W25H512NWxxAM */ + .id = SNOR_ID(0xef, 0xa0, 0x20), + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP, + }, { + /* W25H01NWxxAM */ + .id = SNOR_ID(0xef, 0xa0, 0x21), + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP, + }, { + /* W25H02NWxxAM */ + .id = SNOR_ID(0xef, 0xa0, 0x22), + .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6 | SPI_NOR_4BIT_BP, }, };
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 36540a254458..4926c00b7879 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -1071,7 +1071,7 @@ static int gs_can_open(struct net_device *netdev) usb_free_urb(urb); out_usb_kill_anchored_urbs: if (!parent->active_channels) { - usb_kill_anchored_urbs(&dev->tx_submitted); + usb_kill_anchored_urbs(&parent->rx_submitted);
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) gs_usb_timestamp_stop(parent); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 01eb62706412..0b666a77ea97 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1972,6 +1972,9 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, if (!ent->is_valid) return 0;
+ if (is_multicast_ether_addr(ent->mac)) + return 0; + if (port != ent->port) return 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 32e633d11348..6d2c401bb246 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -2036,6 +2036,7 @@ static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata, { if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) { pdata->rx_adapt_retries = 0; + pdata->mode_set = false; return; }
@@ -2082,6 +2083,7 @@ static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata) */ netif_dbg(pdata, link, pdata->netdev, "Block_lock done"); pdata->rx_adapt_done = true; + pdata->rx_adapt_retries = 0; pdata->mode_set = false; return; } diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index e5809ad5eb82..29f0de9e3154 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1789,6 +1789,9 @@ static int b44_nway_reset(struct net_device *dev) u32 bmcr; int r;
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY) + return phy_ethtool_nway_reset(dev); + spin_lock_irq(&bp->lock); b44_readphy(bp, MII_BMCR, &bmcr); b44_readphy(bp, MII_BMCR, &bmcr); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 844812bd6536..fa3c6515cc4d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -268,13 +268,11 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, case XDP_TX: rx_buf = &rxr->rx_buf_ring[cons]; mapping = rx_buf->mapping - bp->rx_dma_offset; - *event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(xdp))) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags; - *event = BNXT_AGG_EVENT; }
if (tx_avail < tx_needed) { @@ -287,6 +285,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+ *event &= ~BNXT_RX_EVENT; *event |= BNXT_TX_EVENT; __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, NEXT_RX(rxr->rx_prod), xdp); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 8a53c9538b84..095c1cfcc9a1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -759,7 +759,6 @@ static void macb_mac_link_up(struct phylink_config *config, /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers. */ - bp->macbgem_ops.mog_init_rings(bp); macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) @@ -2985,6 +2984,8 @@ static int macb_open(struct net_device *dev) goto pm_exit; }
+ bp->macbgem_ops.mog_init_rings(bp); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { napi_enable(&queue->napi_rx); napi_enable(&queue->napi_tx); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 749b65aab14a..c58e44144c2f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1429,7 +1429,8 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames, int xdp_tx_bd_cnt, i, k; int xdp_tx_frm_cnt = 0;
- if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) + if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags) || + !netif_carrier_ok(ndev))) return -ENETDOWN;
enetc_lock_mdio(); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d1800868c2e0..9018a7d3864f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3934,7 +3934,12 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, txq->bd.cur = bdp;
/* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active);
return 0; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 497a19ca198d..43d0c40de5fc 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -500,7 +500,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) block->priv = priv; err = request_irq(priv->msix_vectors[msix_idx].vector, gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, - 0, block->name, block); + IRQF_NO_AUTOEN, block->name, block); if (err) { dev_err(&priv->pdev->dev, "Failed to receive msix vector %d\n", i); diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 2349750075a5..90805ab65f92 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -111,11 +111,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx, struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
netif_napi_add(priv->dev, &block->napi, gve_poll); + enable_irq(block->irq); }
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) { struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ disable_irq(block->irq); netif_napi_del(&block->napi); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f5eafd1ded41..8dd970ef02ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10572,6 +10572,9 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, bool writen_to_tbl = false; int ret = 0;
+ if (vlan_id >= VLAN_N_VID) + return -EINVAL; + /* When device is resetting or reset failed, firmware is unable to * handle mailbox. Just record the vlan id, and remove it after * reset finished. diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 59c863306657..9eab095d784b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -193,10 +193,10 @@ static int hclge_get_ring_chain_from_mbx( return -EINVAL;
for (i = 0; i < ring_num; i++) { - if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { + if (req->msg.param[i].tqp_index >= vport->nic.kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", req->msg.param[i].tqp_index, - vport->nic.kinfo.rss_size - 1U); + vport->nic.kinfo.num_tqps - 1U); return -EINVAL; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e8573358309c..0bf8fc7e6b3a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -370,12 +370,12 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) new_tqps = kinfo->rss_size * num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) return -ENOMEM;
- for (i = 0; i < kinfo->num_tqps; i++) { + for (i = 0; i < hdev->num_tqps; i++) { hdev->htqp[i].q.handle = &hdev->nic; hdev->htqp[i].q.tqp_index = i; kinfo->tqp[i] = &hdev->htqp[i].q; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index ab7ae418d294..67d7651b6411 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4088,7 +4088,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, u32 length, const u8 *data) { struct e1000_hw *hw = &adapter->hw; - u8 last_byte = *(data + length - 1); + u8 last_byte; + + /* Guard against OOB on data[length - 1] */ + if (unlikely(!length)) + return false; + /* Upper bound: length must not exceed rx_buffer_len */ + if (unlikely(length > adapter->rx_buffer_len)) + return false; + last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { unsigned long irq_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b8de97343ad3..de3d5e5b8306 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1415,4 +1415,15 @@ static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf) return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL; }
+static inline u32 i40e_get_max_num_descriptors(const struct i40e_pf *pf) +{ + const struct i40e_hw *hw = &pf->hw; + + switch (hw->mac.type) { + case I40E_MAC_XL710: + return I40E_MAX_NUM_DESCRIPTORS_XL710; + default: + return I40E_MAX_NUM_DESCRIPTORS; + } +} #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index bce5b76f1e7a..9a96f67fb648 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2010,18 +2010,6 @@ static void i40e_get_drvinfo(struct net_device *netdev, drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; }
-static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - - switch (hw->mac.type) { - case I40E_MAC_XL710: - return I40E_MAX_NUM_DESCRIPTORS_XL710; - default: - return I40E_MAX_NUM_DESCRIPTORS; - } -} - static void i40e_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index eae5923104f7..2dc737c7e3fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2256,6 +2256,7 @@ static void i40e_set_rx_mode(struct net_device *netdev) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); } + i40e_service_event_schedule(vsi->back); }
/** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 646e394f5190..3251ffa7d994 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -656,7 +656,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 8 */ if (!IS_ALIGNED(info->ring_len, 8) || - info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + info->ring_len > i40e_get_max_num_descriptors(pf)) { ret = -EINVAL; goto error_context; } @@ -726,7 +726,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 32 */ if (!IS_ALIGNED(info->ring_len, 32) || - info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) { + info->ring_len > i40e_get_max_num_descriptors(pf)) { ret = -EINVAL; goto error_param; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 5516795cc250..422af897d933 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1718,11 +1718,11 @@ static int iavf_config_rss_reg(struct iavf_adapter *adapter) u16 i;
dw = (u32 *)adapter->rss_key; - for (i = 0; i <= adapter->rss_key_size / 4; i++) + for (i = 0; i < adapter->rss_key_size / 4; i++) wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut; - for (i = 0; i <= adapter->rss_lut_size / 4; i++) + for (i = 0; i < adapter->rss_lut_size / 4; i++) wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
iavf_flush(hw); diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 6c913a703df6..41e4bd49402a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -101,6 +101,9 @@ static int idpf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S; intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S; intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M; + intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M; + intr->dyn_ctl_sw_itridx_ena_m = + PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, IDPF_PF_ITR_IDX_SPACING); diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 371fc5052420..173ddc248867 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1214,7 +1214,7 @@ void idpf_mbx_task(struct work_struct *work) idpf_mb_irq_enable(adapter); else queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, - msecs_to_jiffies(300)); + usecs_to_jiffies(300));
idpf_recv_mb_msg(adapter); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index a986dd572555..ea0eec59a072 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -179,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb, return 1; }
+/** + * idpf_tx_singleq_dma_map_error - handle TX DMA map errors + * @txq: queue to send buffer on + * @skb: send buffer + * @first: original first buffer info buffer for packet + * @idx: starting point on ring to unwind + */ +static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq, + struct sk_buff *skb, + struct idpf_tx_buf *first, u16 idx) +{ + struct libeth_sq_napi_stats ss = { }; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + }; + + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + /* clear dma mappings for failed tx_buf map */ + for (;;) { + struct idpf_tx_buf *tx_buf; + + tx_buf = &txq->tx_buf[idx]; + libeth_tx_complete(tx_buf, &cp); + if (tx_buf == first) + break; + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + if (skb_is_gso(skb)) { + union idpf_tx_flex_desc *tx_desc; + + /* If we failed a DMA mapping for a TSO packet, we will have + * used one additional descriptor for a context + * descriptor. Reset that here. + */ + tx_desc = &txq->flex_tx[idx]; + memset(tx_desc, 0, sizeof(*tx_desc)); + if (idx == 0) + idx = txq->desc_count; + idx--; + } + + /* Update tail in case netdev_xmit_more was previously true */ + idpf_tx_buf_hw_update(txq, idx, false); +} + /** * idpf_tx_singleq_map - Build the Tx base descriptor * @tx_q: queue to send buffer on @@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma)) - return idpf_tx_dma_map_error(tx_q, skb, first, i); + if (unlikely(dma_mapping_error(tx_q->dev, dma))) + return idpf_tx_singleq_dma_map_error(tx_q, skb, + first, i);
/* record length, and DMA address */ dma_unmap_len_set(tx_buf, len, size); @@ -362,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; + u32 count, buf_count = 1; int csum, tso, needed; - unsigned int count; __be16 protocol;
- count = idpf_tx_desc_count_required(tx_q, skb); + count = idpf_tx_res_count_required(tx_q, skb, &buf_count); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 11bb61c12375..d03fb063a1ef 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -7,12 +7,7 @@ #include "idpf.h" #include "idpf_virtchnl.h"
-struct idpf_tx_stash { - struct hlist_node hlist; - struct libeth_sqe buf; -}; - -#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv) +#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv) LIBETH_SQE_CHECK_PRIV(u32);
/** @@ -38,36 +33,6 @@ static bool idpf_chk_linearize(const struct sk_buff *skb, return true; }
-/** - * idpf_buf_lifo_push - push a buffer pointer onto stack - * @stack: pointer to stack struct - * @buf: pointer to buf to push - * - * Returns 0 on success, negative on failure - **/ -static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack, - struct idpf_tx_stash *buf) -{ - if (unlikely(stack->top == stack->size)) - return -ENOSPC; - - stack->bufs[stack->top++] = buf; - - return 0; -} - -/** - * idpf_buf_lifo_pop - pop a buffer pointer from stack - * @stack: pointer to stack struct - **/ -static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack) -{ - if (unlikely(!stack->top)) - return NULL; - - return stack->bufs[--stack->top]; -} - /** * idpf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure @@ -96,52 +61,22 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) { struct libeth_sq_napi_stats ss = { }; - struct idpf_buf_lifo *buf_stack; - struct idpf_tx_stash *stash; struct libeth_cq_pp cp = { .dev = txq->dev, .ss = &ss, }; - struct hlist_node *tmp; - u32 i, tag; + u32 i;
/* Buffers already cleared, nothing to do */ if (!txq->tx_buf) return;
/* Free all the Tx buffer sk_buffs */ - for (i = 0; i < txq->desc_count; i++) + for (i = 0; i < txq->buf_pool_size; i++) libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf); txq->tx_buf = NULL; - - if (!idpf_queue_has(FLOW_SCH_EN, txq)) - return; - - buf_stack = &txq->stash->buf_stack; - if (!buf_stack->bufs) - return; - - /* - * If a Tx timeout occurred, there are potentially still bufs in the - * hash table, free them here. - */ - hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash, - hlist) { - if (!stash) - continue; - - libeth_tx_complete(&stash->buf, &cp); - hash_del(&stash->hlist); - idpf_buf_lifo_push(buf_stack, stash); - } - - for (i = 0; i < buf_stack->size; i++) - kfree(buf_stack->bufs[i]); - - kfree(buf_stack->bufs); - buf_stack->bufs = NULL; }
/** @@ -158,6 +93,9 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq) if (!txq->desc_ring) return;
+ if (txq->refillq) + kfree(txq->refillq->ring); + dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); txq->desc_ring = NULL; txq->next_to_use = 0; @@ -214,41 +152,18 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) */ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) { - struct idpf_buf_lifo *buf_stack; - int buf_size; - int i; - /* Allocate book keeping buffers only. Buffers to be supplied to HW * are allocated by kernel network stack and received as part of skb */ - buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count; - tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL); + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) + tx_q->buf_pool_size = U16_MAX; + else + tx_q->buf_pool_size = tx_q->desc_count; + tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf), + GFP_KERNEL); if (!tx_q->tx_buf) return -ENOMEM;
- if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) - return 0; - - buf_stack = &tx_q->stash->buf_stack; - - /* Initialize tx buf stack for out-of-order completions if - * flow scheduling offload is enabled - */ - buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), - GFP_KERNEL); - if (!buf_stack->bufs) - return -ENOMEM; - - buf_stack->size = tx_q->desc_count; - buf_stack->top = tx_q->desc_count; - - for (i = 0; i < tx_q->desc_count; i++) { - buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]), - GFP_KERNEL); - if (!buf_stack->bufs[i]) - return -ENOMEM; - } - return 0; }
@@ -263,6 +178,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport, struct idpf_tx_queue *tx_q) { struct device *dev = tx_q->dev; + struct idpf_sw_queue *refillq; int err;
err = idpf_tx_buf_alloc_all(tx_q); @@ -286,6 +202,31 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport, tx_q->next_to_clean = 0; idpf_queue_set(GEN_CHK, tx_q);
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) + return 0; + + refillq = tx_q->refillq; + refillq->desc_count = tx_q->buf_pool_size; + refillq->ring = kcalloc(refillq->desc_count, sizeof(u32), + GFP_KERNEL); + if (!refillq->ring) { + err = -ENOMEM; + goto err_alloc; + } + + for (unsigned int i = 0; i < refillq->desc_count; i++) + refillq->ring[i] = + FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) | + FIELD_PREP(IDPF_RFL_BI_GEN_M, + idpf_queue_has(GEN_CHK, refillq)); + + /* Go ahead and flip the GEN bit since this counts as filling + * up the ring, i.e. we already ring wrapped. + */ + idpf_queue_change(GEN_CHK, refillq); + + tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP; + return 0;
err_alloc: @@ -336,8 +277,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) for (i = 0; i < vport->num_txq_grp; i++) { for (j = 0; j < vport->txq_grps[i].num_txq; j++) { struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; - u8 gen_bits = 0; - u16 bufidx_mask;
err = idpf_tx_desc_alloc(vport, txq); if (err) { @@ -346,34 +285,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) i); goto err_out; } - - if (!idpf_is_queue_model_split(vport->txq_model)) - continue; - - txq->compl_tag_cur_gen = 0; - - /* Determine the number of bits in the bufid - * mask and add one to get the start of the - * generation bits - */ - bufidx_mask = txq->desc_count - 1; - while (bufidx_mask >> 1) { - txq->compl_tag_gen_s++; - bufidx_mask = bufidx_mask >> 1; - } - txq->compl_tag_gen_s++; - - gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH - - txq->compl_tag_gen_s; - txq->compl_tag_gen_max = GETMAXVAL(gen_bits); - - /* Set bufid mask based on location of first - * gen bit; it cannot simply be the descriptor - * ring size-1 since we can have size values - * where not all of those bits are set. - */ - txq->compl_tag_bufid_m = - GETMAXVAL(txq->compl_tag_gen_s); }
if (!idpf_is_queue_model_split(vport->txq_model)) @@ -622,18 +533,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq) }
/** - * idpf_rx_post_buf_refill - Post buffer id to refill queue + * idpf_post_buf_refill - Post buffer id to refill queue * @refillq: refill queue to post to * @buf_id: buffer id to post */ -static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) +static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) { u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */ refillq->ring[nta] = - FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | - FIELD_PREP(IDPF_RX_BI_GEN_M, + FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) | + FIELD_PREP(IDPF_RFL_BI_GEN_M, idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) { @@ -1014,6 +925,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) { + if (flow_sch_en) { + kfree(txq_grp->txqs[j]->refillq); + txq_grp->txqs[j]->refillq = NULL; + } + kfree(txq_grp->txqs[j]); txq_grp->txqs[j] = NULL; } @@ -1023,9 +939,6 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq); txq_grp->complq = NULL; - - if (flow_sch_en) - kfree(txq_grp->stashes); } kfree(vport->txq_grps); vport->txq_grps = NULL; @@ -1378,7 +1291,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; - struct idpf_txq_stash *stashes; int j;
tx_qgrp->vport = vport; @@ -1391,15 +1303,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) goto err_alloc; }
- if (split && flow_sch_en) { - stashes = kcalloc(num_txq, sizeof(*stashes), - GFP_KERNEL); - if (!stashes) - goto err_alloc; - - tx_qgrp->stashes = stashes; - } - for (j = 0; j < tx_qgrp->num_txq; j++) { struct idpf_tx_queue *q = tx_qgrp->txqs[j];
@@ -1419,12 +1322,14 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) if (!flow_sch_en) continue;
- if (split) { - q->stash = &stashes[j]; - hash_init(q->stash->sched_buf_hash); - } - idpf_queue_set(FLOW_SCH_EN, q); + + q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL); + if (!q->refillq) + goto err_alloc; + + idpf_queue_set(GEN_CHK, q->refillq); + idpf_queue_set(RFL_GEN_CHK, q->refillq); }
if (!split) @@ -1674,82 +1579,6 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) wake_up(&vport->sw_marker_wq); }
-/** - * idpf_tx_clean_stashed_bufs - clean bufs that were stored for - * out of order completions - * @txq: queue to clean - * @compl_tag: completion tag of packet to clean (from completion descriptor) - * @cleaned: pointer to stats struct to track cleaned packets/bytes - * @budget: Used to determine if we are in netpoll - */ -static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, - u16 compl_tag, - struct libeth_sq_napi_stats *cleaned, - int budget) -{ - struct idpf_tx_stash *stash; - struct hlist_node *tmp_buf; - struct libeth_cq_pp cp = { - .dev = txq->dev, - .ss = cleaned, - .napi = budget, - }; - - /* Buffer completion */ - hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, - hlist, compl_tag) { - if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag)) - continue; - - hash_del(&stash->hlist); - libeth_tx_complete(&stash->buf, &cp); - - /* Push shadow buf back onto stack */ - idpf_buf_lifo_push(&txq->stash->buf_stack, stash); - } -} - -/** - * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a - * later time (only relevant for flow scheduling mode) - * @txq: Tx queue to clean - * @tx_buf: buffer to store - */ -static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, - struct idpf_tx_buf *tx_buf) -{ - struct idpf_tx_stash *stash; - - if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) - return 0; - - stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); - if (unlikely(!stash)) { - net_err_ratelimited("%s: No out-of-order TX buffers left!\n", - netdev_name(txq->netdev)); - - return -ENOMEM; - } - - /* Store buffer params in shadow buffer */ - stash->buf.skb = tx_buf->skb; - stash->buf.bytes = tx_buf->bytes; - stash->buf.packets = tx_buf->packets; - stash->buf.type = tx_buf->type; - stash->buf.nr_frags = tx_buf->nr_frags; - dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma)); - dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len)); - idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf); - - /* Add buffer to buf_hash table to be freed later */ - hash_add(txq->stash->sched_buf_hash, &stash->hlist, - idpf_tx_buf_compl_tag(&stash->buf)); - - tx_buf->type = LIBETH_SQE_EMPTY; - - return 0; -} - #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \ do { \ if (unlikely(++(ntc) == (txq)->desc_count)) { \ @@ -1777,14 +1606,8 @@ do { \ * Separate packet completion events will be reported on the completion queue, * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling. - * - * Furthermore, in flow scheduling mode, check to make sure there are enough - * reserve buffers to stash the packet. If there are not, return early, which - * will leave next_to_clean pointing to the packet that failed to be stashed. - * - * Return: false in the scenario above, true otherwise. */ -static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, +static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, struct libeth_sq_napi_stats *cleaned, bool descs_only) @@ -1798,7 +1621,12 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, .napi = napi_budget, }; struct idpf_tx_buf *tx_buf; - bool clean_complete = true; + + if (descs_only) { + /* Bump ring index to mark as cleaned. */ + tx_q->next_to_clean = end; + return; + }
tx_desc = &tx_q->flex_tx[ntc]; next_pending_desc = &tx_q->flex_tx[end]; @@ -1818,132 +1646,58 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, break;
eop_idx = tx_buf->rs_idx; + libeth_tx_complete(tx_buf, &cp);
- if (descs_only) { - if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) { - clean_complete = false; - goto tx_splitq_clean_out; - } - - idpf_stash_flow_sch_buffers(tx_q, tx_buf); + /* unmap remaining buffers */ + while (ntc != eop_idx) { + idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, + tx_desc, tx_buf);
- while (ntc != eop_idx) { - idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, - tx_desc, tx_buf); - idpf_stash_flow_sch_buffers(tx_q, tx_buf); - } - } else { + /* unmap any remaining paged data */ libeth_tx_complete(tx_buf, &cp); - - /* unmap remaining buffers */ - while (ntc != eop_idx) { - idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, - tx_desc, tx_buf); - - /* unmap any remaining paged data */ - libeth_tx_complete(tx_buf, &cp); - } }
fetch_next_txq_desc: idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf); }
-tx_splitq_clean_out: tx_q->next_to_clean = ntc; - - return clean_complete; }
-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \ -do { \ - (buf)++; \ - (ntc)++; \ - if (unlikely((ntc) == (txq)->desc_count)) { \ - buf = (txq)->tx_buf; \ - ntc = 0; \ - } \ -} while (0) - /** - * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers + * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers * @txq: queue to clean - * @compl_tag: completion tag of packet to clean (from completion descriptor) + * @buf_id: packet's starting buffer ID, from completion descriptor * @cleaned: pointer to stats struct to track cleaned packets/bytes * @budget: Used to determine if we are in netpoll * - * Cleans all buffers associated with the input completion tag either from the - * TX buffer ring or from the hash table if the buffers were previously - * stashed. Returns the byte/segment count for the cleaned packet associated - * this completion tag. + * Clean all buffers associated with the packet starting at buf_id. Returns the + * byte/segment count for the cleaned packet. */ -static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, - struct libeth_sq_napi_stats *cleaned, - int budget) +static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id, + struct libeth_sq_napi_stats *cleaned, + int budget) { - u16 idx = compl_tag & txq->compl_tag_bufid_m; struct idpf_tx_buf *tx_buf = NULL; struct libeth_cq_pp cp = { .dev = txq->dev, .ss = cleaned, .napi = budget, }; - u16 ntc, orig_idx = idx; - - tx_buf = &txq->tx_buf[idx];
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX || - idpf_tx_buf_compl_tag(tx_buf) != compl_tag)) - return false; - - if (tx_buf->type == LIBETH_SQE_SKB) + tx_buf = &txq->tx_buf[buf_id]; + if (tx_buf->type == LIBETH_SQE_SKB) { libeth_tx_complete(tx_buf, &cp); + idpf_post_buf_refill(txq->refillq, buf_id); + }
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); + while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) { + buf_id = idpf_tx_buf_next(tx_buf);
- while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) { + tx_buf = &txq->tx_buf[buf_id]; libeth_tx_complete(tx_buf, &cp); - idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); + idpf_post_buf_refill(txq->refillq, buf_id); } - - /* - * It's possible the packet we just cleaned was an out of order - * completion, which means we can stash the buffers starting from - * the original next_to_clean and reuse the descriptors. We need - * to compare the descriptor ring next_to_clean packet's "first" buffer - * to the "first" buffer of the packet we just cleaned to determine if - * this is the case. Howevever, next_to_clean can point to either a - * reserved buffer that corresponds to a context descriptor used for the - * next_to_clean packet (TSO packet) or the "first" buffer (single - * packet). The orig_idx from the packet we just cleaned will always - * point to the "first" buffer. If next_to_clean points to a reserved - * buffer, let's bump ntc once and start the comparison from there. - */ - ntc = txq->next_to_clean; - tx_buf = &txq->tx_buf[ntc]; - - if (tx_buf->type == LIBETH_SQE_CTX) - idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf); - - /* - * If ntc still points to a different "first" buffer, clean the - * descriptor ring and stash all of the buffers for later cleaning. If - * we cannot stash all of the buffers, next_to_clean will point to the - * "first" buffer of the packet that could not be stashed and cleaning - * will start there next time. - */ - if (unlikely(tx_buf != &txq->tx_buf[orig_idx] && - !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned, - true))) - return true; - - /* - * Otherwise, update next_to_clean to reflect the cleaning that was - * done above. - */ - txq->next_to_clean = idx; - - return true; }
/** @@ -1962,22 +1716,17 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct libeth_sq_napi_stats *cleaned, int budget) { - u16 compl_tag; + /* RS completion contains queue head for queue based scheduling or + * completion tag for flow based scheduling. + */ + u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
if (!idpf_queue_has(FLOW_SCH_EN, txq)) { - u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); - - idpf_tx_splitq_clean(txq, head, budget, cleaned, false); + idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false); return; }
- compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag); - - /* If we didn't clean anything on the ring, this packet must be - * in the hash table. Go clean it there. - */ - if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget)) - idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); + idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget); }
/** @@ -2094,8 +1843,7 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget, /* Update BQL */ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || - np->state != __IDPF_VPORT_UP || + dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP || !netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, @@ -2152,15 +1900,21 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); }
-/* Global conditions to tell whether the txq (and related resources) - * has room to allow the use of "size" descriptors. +/** + * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available + * @tx_q: the queue to be checked + * @descs_needed: number of descriptors required for this packet + * @bufs_needed: number of Tx buffers required for this packet + * + * Return: 0 if no room available, 1 otherwise */ -static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) +static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed, + u32 bufs_needed) { - if (IDPF_DESC_UNUSED(tx_q) < size || + if (IDPF_DESC_UNUSED(tx_q) < descs_needed || IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || - IDPF_TX_BUF_RSV_LOW(tx_q)) + idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed) return 0; return 1; } @@ -2169,14 +1923,21 @@ static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked * @descs_needed: number of descriptors required for this packet + * @bufs_needed: number of buffers needed for this packet * - * Returns 0 if stop is not needed + * Return: 0 if stop is not needed */ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, - unsigned int descs_needed) + u32 descs_needed, + u32 bufs_needed) { + /* Since we have multiple resources to check for splitq, our + * start,stop_thrs becomes a boolean check instead of a count + * threshold. + */ if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, - idpf_txq_has_room(tx_q, descs_needed), + idpf_txq_has_room(tx_q, descs_needed, + bufs_needed), 1, 1)) return 0;
@@ -2218,14 +1979,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, }
/** - * idpf_tx_desc_count_required - calculate number of Tx descriptors needed + * idpf_tx_res_count_required - get number of Tx resources needed for this pkt * @txq: queue to send buffer on * @skb: send buffer + * @bufs_needed: (output) number of buffers needed for this skb. * - * Returns number of data descriptors needed for this skb. + * Return: number of data descriptors and buffers needed for this skb. */ -unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, - struct sk_buff *skb) +unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq, + struct sk_buff *skb, + u32 *bufs_needed) { const struct skb_shared_info *shinfo; unsigned int count = 0, i; @@ -2236,6 +1999,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, return count;
shinfo = skb_shinfo(skb); + *bufs_needed += shinfo->nr_frags; for (i = 0; i < shinfo->nr_frags; i++) { unsigned int size;
@@ -2265,71 +2029,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, }
/** - * idpf_tx_dma_map_error - handle TX DMA map errors - * @txq: queue to send buffer on - * @skb: send buffer - * @first: original first buffer info buffer for packet - * @idx: starting point on ring to unwind + * idpf_tx_splitq_bump_ntu - adjust NTU and generation + * @txq: the tx ring to wrap + * @ntu: ring index to bump */ -void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, - struct idpf_tx_buf *first, u16 idx) +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) { - struct libeth_sq_napi_stats ss = { }; - struct libeth_cq_pp cp = { - .dev = txq->dev, - .ss = &ss, - }; + ntu++;
- u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.dma_map_errs); - u64_stats_update_end(&txq->stats_sync); + if (ntu == txq->desc_count) + ntu = 0;
- /* clear dma mappings for failed tx_buf map */ - for (;;) { - struct idpf_tx_buf *tx_buf; + return ntu; +}
- tx_buf = &txq->tx_buf[idx]; - libeth_tx_complete(tx_buf, &cp); - if (tx_buf == first) - break; - if (idx == 0) - idx = txq->desc_count; - idx--; - } +/** + * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue + * @refillq: refill queue to get buffer ID from + * @buf_id: return buffer ID + * + * Return: true if a buffer ID was found, false if not + */ +static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq, + u32 *buf_id) +{ + u32 ntc = refillq->next_to_clean; + u32 refill_desc;
- if (skb_is_gso(skb)) { - union idpf_tx_flex_desc *tx_desc; + refill_desc = refillq->ring[ntc];
- /* If we failed a DMA mapping for a TSO packet, we will have - * used one additional descriptor for a context - * descriptor. Reset that here. - */ - tx_desc = &txq->flex_tx[idx]; - memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); - if (idx == 0) - idx = txq->desc_count; - idx--; + if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) != + !!(refill_desc & IDPF_RFL_BI_GEN_M))) + return false; + + *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc); + + if (unlikely(++ntc == refillq->desc_count)) { + idpf_queue_change(RFL_GEN_CHK, refillq); + ntc = 0; }
- /* Update tail in case netdev_xmit_more was previously true */ - idpf_tx_buf_hw_update(txq, idx, false); + refillq->next_to_clean = ntc; + + return true; }
/** - * idpf_tx_splitq_bump_ntu - adjust NTU and generation - * @txq: the tx ring to wrap - * @ntu: ring index to bump + * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error + * @txq: Tx queue to unwind + * @params: pointer to splitq params struct + * @first: starting buffer for packet to unmap */ -static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) +static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq, + struct idpf_tx_splitq_params *params, + struct idpf_tx_buf *first) { - ntu++; + struct idpf_sw_queue *refillq = txq->refillq; + struct libeth_sq_napi_stats ss = { }; + struct idpf_tx_buf *tx_buf = first; + struct libeth_cq_pp cp = { + .dev = txq->dev, + .ss = &ss, + };
- if (ntu == txq->desc_count) { - ntu = 0; - txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq); + u64_stats_update_begin(&txq->stats_sync); + u64_stats_inc(&txq->q_stats.dma_map_errs); + u64_stats_update_end(&txq->stats_sync); + + libeth_tx_complete(tx_buf, &cp); + while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) { + tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)]; + libeth_tx_complete(tx_buf, &cp); }
- return ntu; + /* Update tail in case netdev_xmit_more was previously true. */ + idpf_tx_buf_hw_update(txq, params->prev_ntu, false); + + if (!refillq) + return; + + /* Restore refillq state to avoid leaking tags. */ + if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq)) + idpf_queue_change(RFL_GEN_CHK, refillq); + refillq->next_to_clean = params->prev_refill_ntc; }
/** @@ -2353,6 +2135,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, struct netdev_queue *nq; struct sk_buff *skb; skb_frag_t *frag; + u32 next_buf_id; u16 td_cmd = 0; dma_addr_t dma;
@@ -2370,17 +2153,16 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, tx_buf = first; first->nr_frags = 0;
- params->compl_tag = - (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i; - for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma)) - return idpf_tx_dma_map_error(tx_q, skb, first, i); + if (unlikely(dma_mapping_error(tx_q->dev, dma))) { + idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL; + return idpf_tx_splitq_pkt_err_unmap(tx_q, params, + first); + }
first->nr_frags++; - idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag; tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */ @@ -2436,29 +2218,12 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, max_data);
if (unlikely(++i == tx_q->desc_count)) { - tx_buf = tx_q->tx_buf; tx_desc = &tx_q->flex_tx[0]; i = 0; - tx_q->compl_tag_cur_gen = - IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } else { - tx_buf++; tx_desc++; }
- /* Since this packet has a buffer that is going to span - * multiple descriptors, it's going to leave holes in - * to the TX buffer ring. To ensure these holes do not - * cause issues in the cleaning routines, we will clear - * them of any stale data and assign them the same - * completion tag as the current packet. Then when the - * packet is being cleaned, the cleaning routines will - * simply pass over these holes and finish cleaning the - * rest of the packet. - */ - tx_buf->type = LIBETH_SQE_EMPTY; - idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag; - /* Adjust the DMA offset and the remaining size of the * fragment. On the first iteration of this loop, * max_data will be >= 12K and <= 16K-1. On any @@ -2483,15 +2248,25 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
if (unlikely(++i == tx_q->desc_count)) { - tx_buf = tx_q->tx_buf; tx_desc = &tx_q->flex_tx[0]; i = 0; - tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } else { - tx_buf++; tx_desc++; }
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { + if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq, + &next_buf_id))) { + idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL; + return idpf_tx_splitq_pkt_err_unmap(tx_q, params, + first); + } + } else { + next_buf_id = i; + } + idpf_tx_buf_next(tx_buf) = next_buf_id; + tx_buf = &tx_q->tx_buf[next_buf_id]; + size = skb_frag_size(frag); data_len -= size;
@@ -2506,6 +2281,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
/* write last descriptor with RS and EOP bits */ first->rs_idx = i; + idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL; td_cmd |= params->eop_cmd; idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size); i = idpf_tx_splitq_bump_ntu(tx_q, i); @@ -2609,8 +2385,6 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) struct idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use;
- txq->tx_buf[i].type = LIBETH_SQE_CTX; - /* grab the next descriptor */ desc = &txq->flex_ctx[i]; txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); @@ -2636,6 +2410,21 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) return NETDEV_TX_OK; }
+/** + * idpf_tx_splitq_need_re - check whether RE bit needs to be set + * @tx_q: pointer to Tx queue + * + * Return: true if RE bit needs to be set, false otherwise + */ +static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q) +{ + int gap = tx_q->next_to_use - tx_q->last_re; + + gap += (gap < 0) ? tx_q->desc_count : 0; + + return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP; +} + /** * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors * @skb: send buffer @@ -2646,12 +2435,15 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q) { - struct idpf_tx_splitq_params tx_params = { }; + struct idpf_tx_splitq_params tx_params = { + .prev_ntu = tx_q->next_to_use, + }; struct idpf_tx_buf *first; - unsigned int count; + u32 count, buf_count = 1; + u32 buf_id; int tso;
- count = idpf_tx_desc_count_required(tx_q, skb); + count = idpf_tx_res_count_required(tx_q, skb, &buf_count); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb);
@@ -2661,7 +2453,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */ count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso); - if (idpf_tx_maybe_stop_splitq(tx_q, count)) { + if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY; @@ -2688,36 +2480,47 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, u64_stats_update_end(&tx_q->stats_sync); }
- /* record the location of the first descriptor for this packet */ - first = &tx_q->tx_buf[tx_q->next_to_use]; - first->skb = skb; + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { + struct idpf_sw_queue *refillq = tx_q->refillq;
- if (tso) { - first->packets = tx_params.offload.tso_segs; - first->bytes = skb->len + - ((first->packets - 1) * tx_params.offload.tso_hdr_len); - } else { - first->packets = 1; - first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - } + /* Save refillq state in case of a packet rollback. Otherwise, + * the tags will be leaked since they will be popped from the + * refillq but never reposted during cleaning. + */ + tx_params.prev_refill_gen = + idpf_queue_has(RFL_GEN_CHK, refillq); + tx_params.prev_refill_ntc = refillq->next_to_clean; + + if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq, + &buf_id))) { + if (tx_params.prev_refill_gen != + idpf_queue_has(RFL_GEN_CHK, refillq)) + idpf_queue_change(RFL_GEN_CHK, refillq); + refillq->next_to_clean = tx_params.prev_refill_ntc; + + tx_q->next_to_use = tx_params.prev_ntu; + return idpf_tx_drop_skb(tx_q, skb); + } + tx_params.compl_tag = buf_id;
- if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; - /* Set the RE bit to catch any packets that may have not been - * stashed during RS completion cleaning. MIN_GAP is set to - * MIN_RING size to ensure it will be set at least once each - * time around the ring. + /* Set the RE bit to periodically "clean" the descriptor ring. + * MIN_GAP is set to MIN_RING size to ensure it will be set at + * least once each time around the ring. */ - if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) { + if (idpf_tx_splitq_need_re(tx_q)) { tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE; tx_q->txq_grp->num_completions_pending++; + tx_q->last_re = tx_q->next_to_use; }
if (skb->ip_summed == CHECKSUM_PARTIAL) tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else { + buf_id = tx_q->next_to_use; + tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2; tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
@@ -2725,6 +2528,18 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN; }
+ first = &tx_q->tx_buf[buf_id]; + first->skb = skb; + + if (tso) { + first->packets = tx_params.offload.tso_segs; + first->bytes = skb->len + + ((first->packets - 1) * tx_params.offload.tso_hdr_len); + } else { + first->packets = 1; + first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + } + idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK; @@ -3220,7 +3035,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) skip_data: rx_buf->page = NULL;
- idpf_rx_post_buf_refill(refillq, buf_id); + idpf_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */ @@ -3328,10 +3143,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq, bool failure;
if (idpf_queue_has(RFL_GEN_CHK, refillq) != - !!(refill_desc & IDPF_RX_BI_GEN_M)) + !!(refill_desc & IDPF_RFL_BI_GEN_M)) break;
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc); failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc); if (failure) break; @@ -3502,21 +3317,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) /** * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings * @q_vector: pointer to q_vector - * @type: itr index - * @itr: itr value */ -static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector, - const int type, u16 itr) +static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector) { - u32 itr_val; + u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m; + int type = IDPF_NO_ITR_UPDATE_IDX; + u16 itr = 0; + + if (q_vector->wb_on_itr) { + /* + * Trigger a software interrupt when exiting wb_on_itr, to make + * sure we catch any pending write backs that might have been + * missed due to interrupt state transition. + */ + itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m | + q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m; + type = IDPF_SW_ITR_UPDATE_IDX; + itr = IDPF_ITR_20K; + }
itr &= IDPF_ITR_MASK; /* Don't clear PBA because that can cause lost interrupts that * came in while we were cleaning/polling */ - itr_val = q_vector->intr_reg.dyn_ctl_intena_m | - (type << q_vector->intr_reg.dyn_ctl_itridx_s) | - (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1)); + itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) | + (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
return itr_val; } @@ -3614,9 +3439,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector) /* net_dim() updates ITR out-of-band using a work item */ idpf_net_dim(q_vector);
+ intval = idpf_vport_intr_buildreg_itr(q_vector); q_vector->wb_on_itr = false; - intval = idpf_vport_intr_buildreg_itr(q_vector, - IDPF_NO_ITR_UPDATE_IDX, 0);
writel(intval, q_vector->intr_reg.dyn_ctl); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index ffeeaede6cf8..48d55b373425 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -107,8 +107,8 @@ do { \ */ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_GEN_M BIT(16) -#define IDPF_RX_BI_BUFID_M GENMASK(15, 0) +#define IDPF_RFL_BI_GEN_M BIT(16) +#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M @@ -117,10 +117,6 @@ do { \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) -#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ - (txq)->desc_count >> 2) - #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1) /* Determine the absolute number of completions pending, i.e. the number of * completions that are expected to arrive on the TX completion queue. @@ -130,11 +126,7 @@ do { \ 0 : U32_MAX) + \ (txq)->num_completions_pending - (txq)->complq->num_completions)
-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16 -/* Adjust the generation for the completion tag and wrap if necessary */ -#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \ - ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \ - 0 : (txq)->compl_tag_cur_gen) +#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
@@ -150,18 +142,6 @@ union idpf_tx_flex_desc {
#define idpf_tx_buf libeth_sqe
-/** - * struct idpf_buf_lifo - LIFO for managing OOO completions - * @top: Used to know how many buffers are left - * @size: Total size of LIFO - * @bufs: Backing array - */ -struct idpf_buf_lifo { - u16 top; - u16 size; - struct idpf_tx_stash **bufs; -}; - /** * struct idpf_tx_offload_params - Offload parameters for a given packet * @tx_flags: Feature flags enabled for this packet @@ -194,6 +174,9 @@ struct idpf_tx_offload_params { * @compl_tag: Associated tag for completion * @td_tag: Descriptor tunneling tag * @offload: Offload parameters + * @prev_ntu: stored TxQ next_to_use in case of rollback + * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback + * @prev_refill_gen: stored refillq generation bit in case of packet rollback */ struct idpf_tx_splitq_params { enum idpf_tx_desc_dtype_value dtype; @@ -204,6 +187,10 @@ struct idpf_tx_splitq_params { };
struct idpf_tx_offload_params offload; + + u16 prev_ntu; + u16 prev_refill_ntc; + bool prev_refill_gen; };
enum idpf_tx_ctx_desc_eipt_offload { @@ -354,6 +341,8 @@ struct idpf_vec_regs { * @dyn_ctl_itridx_m: Mask for ITR index * @dyn_ctl_intrvl_s: Register bit offset for ITR interval * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature + * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index + * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable * @rx_itr: RX ITR register * @tx_itr: TX ITR register * @icr_ena: Interrupt cause register offset @@ -367,6 +356,8 @@ struct idpf_intr_reg { u32 dyn_ctl_itridx_m; u32 dyn_ctl_intrvl_s; u32 dyn_ctl_wb_on_itr_m; + u32 dyn_ctl_sw_itridx_ena_m; + u32 dyn_ctl_swint_trig_m; void __iomem *rx_itr; void __iomem *tx_itr; void __iomem *icr_ena; @@ -437,7 +428,7 @@ struct idpf_q_vector { cpumask_var_t affinity_mask; __cacheline_group_end_aligned(cold); }; -libeth_cacheline_set_assert(struct idpf_q_vector, 112, +libeth_cacheline_set_assert(struct idpf_q_vector, 120, 24 + sizeof(struct napi_struct) + 2 * sizeof(struct dim), 8 + sizeof(cpumask_var_t)); @@ -471,22 +462,13 @@ struct idpf_tx_queue_stats { #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode) #define IDPF_ITR_TX_DEF IDPF_ITR_20K #define IDPF_ITR_RX_DEF IDPF_ITR_20K +/* Index used for 'SW ITR' update in DYN_CTL register */ +#define IDPF_SW_ITR_UPDATE_IDX 2 /* Index used for 'No ITR' update in DYN_CTL register */ #define IDPF_NO_ITR_UPDATE_IDX 3 #define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt) #define IDPF_DIM_DEFAULT_PROFILE_IX 1
-/** - * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode - * @buf_stack: Stack of empty buffers to store buffer info for out of order - * buffer completions. See struct idpf_buf_lifo - * @sched_buf_hash: Hash table to store buffers - */ -struct idpf_txq_stash { - struct idpf_buf_lifo buf_stack; - DECLARE_HASHTABLE(sched_buf_hash, 12); -} ____cacheline_aligned; - /** * struct idpf_rx_queue - software structure representing a receive queue * @rx: universal receive descriptor array @@ -617,6 +599,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @netdev: &net_device corresponding to this queue * @next_to_use: Next descriptor to use * @next_to_clean: Next descriptor to clean + * @last_re: last descriptor index that RE bit was set + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on * the TX completion queue, it can be for any TXQ associated * with that completion queue. This means we can clean up to @@ -627,17 +611,14 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * only once at the end of the cleaning routine. * @clean_budget: singleq only, queue cleaning budget * @cleaned_pkts: Number of packets cleaned for the above said case - * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather - * @stash: Tx buffer stash for Flow-based scheduling mode - * @compl_tag_bufid_m: Completion tag buffer id mask - * @compl_tag_cur_gen: Used to keep track of current completion tag generation - * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @refillq: Pointer to refill queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_tx_queue_stats * @q_id: Queue id * @size: Length of descriptor ring in bytes * @dma: Physical address of ring * @q_vector: Backreference to associated vector + * @buf_pool_size: Total number of idpf_tx_buf */ struct idpf_tx_queue { __cacheline_group_begin_aligned(read_mostly); @@ -659,7 +640,6 @@ struct idpf_tx_queue { u16 desc_count;
u16 tx_min_pkt_len; - u16 compl_tag_gen_s;
struct net_device *netdev; __cacheline_group_end_aligned(read_mostly); @@ -667,6 +647,8 @@ struct idpf_tx_queue { __cacheline_group_begin_aligned(read_write); u16 next_to_use; u16 next_to_clean; + u16 last_re; + u16 tx_max_bufs;
union { u32 cleaned_bytes; @@ -674,12 +656,7 @@ struct idpf_tx_queue { }; u16 cleaned_pkts;
- u16 tx_max_bufs; - struct idpf_txq_stash *stash; - - u16 compl_tag_bufid_m; - u16 compl_tag_cur_gen; - u16 compl_tag_gen_max; + struct idpf_sw_queue *refillq;
struct u64_stats_sync stats_sync; struct idpf_tx_queue_stats q_stats; @@ -691,11 +668,12 @@ struct idpf_tx_queue { dma_addr_t dma;
struct idpf_q_vector *q_vector; + u32 buf_pool_size; __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_tx_queue, 64, - 88 + sizeof(struct u64_stats_sync), - 24); + 80 + sizeof(struct u64_stats_sync), + 32);
/** * struct idpf_buf_queue - software structure representing a buffer queue @@ -905,7 +883,6 @@ struct idpf_rxq_group { * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers - * @stashes: array of OOO stashes for the queues * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues @@ -920,7 +897,6 @@ struct idpf_txq_group {
u16 num_txq; struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q]; - struct idpf_txq_stash *stashes;
struct idpf_compl_queue *complq;
@@ -1013,6 +989,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector) reg->dyn_ctl); }
+/** + * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq + * @refillq: pointer to refillq containing buf_ids + */ +static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq) +{ + return (refillq->next_to_use > refillq->next_to_clean ? + 0 : refillq->desc_count) + + refillq->next_to_use - refillq->next_to_clean - 1; +} + int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -1040,10 +1027,8 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more); unsigned int idpf_size_to_txd_count(unsigned int size); netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); -void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, - struct idpf_tx_buf *first, u16 ring_idx); -unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, - struct sk_buff *skb); +unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq, + struct sk_buff *skb, u32 *buf_count); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q); diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index aad62e270ae4..aba828abcb17 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -101,6 +101,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S; intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M; + intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M; + intr->dyn_ctl_sw_itridx_ena_m = + VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, IDPF_VF_ITR_IDX_SPACING); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 5197ce816581..cc6a63e2573f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -432,6 +432,14 @@ static int otx2_set_ringparam(struct net_device *netdev, */ if (rx_count < pfvf->hw.rq_skid) rx_count = pfvf->hw.rq_skid; + + if (ring->rx_pending < 16) { + netdev_err(netdev, + "rx ring size %u invalid, min is 16\n", + ring->rx_pending); + return -EINVAL; + } + rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
/* Due pipelining impact minimum 2000 unused SQ CQE's diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 511b3ba24542..e9d49afc31db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -143,6 +143,11 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, struct pci_dev *pdev = dev->pdev; int ret = 0;
+ if (mlx5_fw_reset_in_progress(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Can't reload during firmware reset"); + return -EBUSY; + } + if (mlx5_dev_is_lightweight(dev)) { if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 080e7eab52c7..0b82a6a133d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -33,6 +33,7 @@ #include "lib/eq.h" #include "fw_tracer.h" #include "fw_tracer_tracepoint.h" +#include <linux/ctype.h>
static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) { @@ -358,6 +359,47 @@ static const char *VAL_PARM = "%llx"; static const char *REPLACE_64_VAL_PARM = "%x%x"; static const char *PARAM_CHAR = "%";
+static bool mlx5_is_valid_spec(const char *str) +{ + /* Parse format specifiers to find the actual type. + * Structure: %[flags][width][.precision][length]type + * Skip flags, width, precision & length. + */ + while (isdigit(*str) || *str == '#' || *str == '.' || *str == 'l') + str++; + + /* Check if it's a valid integer/hex specifier or %%: + * Valid formats: %x, %d, %i, %u, etc. + */ + if (*str != 'x' && *str != 'X' && *str != 'd' && *str != 'i' && + *str != 'u' && *str != 'c' && *str != '%') + return false; + + return true; +} + +static bool mlx5_tracer_validate_params(const char *str) +{ + const char *substr = str; + + if (!str) + return false; + + substr = strstr(substr, PARAM_CHAR); + while (substr) { + if (!mlx5_is_valid_spec(substr + 1)) + return false; + + if (*(substr + 1) == '%') + substr = strstr(substr + 2, PARAM_CHAR); + else + substr = strstr(substr + 1, PARAM_CHAR); + + } + + return true; +} + static int mlx5_tracer_message_hash(u32 message_id) { return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1); @@ -419,6 +461,10 @@ static int mlx5_tracer_get_num_of_params(char *str) char *substr, *pstr = str; int num_of_params = 0;
+ /* Validate that all parameters are valid before processing */ + if (!mlx5_tracer_validate_params(str)) + return -EINVAL; + /* replace %llx with %x%x */ substr = strstr(pstr, VAL_PARM); while (substr) { @@ -427,11 +473,15 @@ static int mlx5_tracer_get_num_of_params(char *str) substr = strstr(pstr, VAL_PARM); }
- /* count all the % characters */ + /* count all the % characters, but skip %% (escaped percent) */ substr = strstr(str, PARAM_CHAR); while (substr) { - num_of_params += 1; - str = substr + 1; + if (*(substr + 1) != '%') { + num_of_params += 1; + str = substr + 1; + } else { + str = substr + 2; + } substr = strstr(str, PARAM_CHAR); }
@@ -570,14 +620,17 @@ void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt, { char tmp[512];
- snprintf(tmp, sizeof(tmp), str_frmt->string, - str_frmt->params[0], - str_frmt->params[1], - str_frmt->params[2], - str_frmt->params[3], - str_frmt->params[4], - str_frmt->params[5], - str_frmt->params[6]); + if (str_frmt->invalid_string) + snprintf(tmp, sizeof(tmp), "BAD_FORMAT: %s", str_frmt->string); + else + snprintf(tmp, sizeof(tmp), str_frmt->string, + str_frmt->params[0], + str_frmt->params[1], + str_frmt->params[2], + str_frmt->params[3], + str_frmt->params[4], + str_frmt->params[5], + str_frmt->params[6]);
trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost, str_frmt->event_id, tmp); @@ -609,6 +662,13 @@ static int mlx5_tracer_handle_raw_string(struct mlx5_fw_tracer *tracer, return 0; }
+static void mlx5_tracer_handle_bad_format_string(struct mlx5_fw_tracer *tracer, + struct tracer_string_format *cur_string) +{ + cur_string->invalid_string = true; + list_add_tail(&cur_string->list, &tracer->ready_strings_list); +} + static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, struct tracer_event *tracer_event) { @@ -619,12 +679,18 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, if (!cur_string) return mlx5_tracer_handle_raw_string(tracer, tracer_event);
- cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string); - cur_string->last_param_num = 0; cur_string->event_id = tracer_event->event_id; cur_string->tmsn = tracer_event->string_event.tmsn; cur_string->timestamp = tracer_event->string_event.timestamp; cur_string->lost = tracer_event->lost_event; + cur_string->last_param_num = 0; + cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string); + if (cur_string->num_of_params < 0) { + pr_debug("%s Invalid format string parameters\n", + __func__); + mlx5_tracer_handle_bad_format_string(tracer, cur_string); + return 0; + } if (cur_string->num_of_params == 0) /* trace with no params */ list_add_tail(&cur_string->list, &tracer->ready_strings_list); } else { @@ -634,6 +700,11 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, __func__, tracer_event->string_event.tmsn); return mlx5_tracer_handle_raw_string(tracer, tracer_event); } + if (cur_string->num_of_params < 0) { + pr_debug("%s string parameter of invalid string, dumping\n", + __func__); + return 0; + } cur_string->last_param_num += 1; if (cur_string->last_param_num > TRACER_MAX_PARAMS) { pr_debug("%s Number of params exceeds the max (%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 5c548bb74f07..30d0bcba8847 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -125,6 +125,7 @@ struct tracer_string_format { struct list_head list; u32 timestamp; bool lost; + bool invalid_string; };
enum mlx5_fw_tracer_ownership_state { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 558962423521..f4cb3e78d065 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -52,6 +52,7 @@ #include "devlink.h" #include "lag/lag.h" #include "en/tc/post_meter.h" +#include "fw_reset.h"
/* There are two match-all miss flows, one for unicast dst mac and * one for multicast. @@ -3731,6 +3732,11 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (IS_ERR(esw)) return PTR_ERR(esw);
+ if (mlx5_fw_reset_in_progress(esw->dev)) { + NL_SET_ERR_MSG_MOD(extack, "Can't change eswitch mode during firmware reset"); + return -EBUSY; + } + if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 35d2fe08c0fb..1411513da66b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -15,6 +15,7 @@ enum { MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, + MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, };
struct mlx5_fw_reset { @@ -126,6 +127,16 @@ int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_ty return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL); }
+bool mlx5_fw_reset_in_progress(struct mlx5_core_dev *dev) +{ + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + + if (!fw_reset) + return false; + + return test_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags); +} + static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev, u8 *reset_method) { @@ -241,6 +252,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); devl_unlock(devlink); } + + clear_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags); }
static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) @@ -456,27 +469,48 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, reset_request_work); struct mlx5_core_dev *dev = fw_reset->dev; + bool nack_request = false; + struct devlink *devlink; int err;
err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method); - if (err) + if (err) { + nack_request = true; mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err); + } else if (!mlx5_is_reset_now_capable(dev, fw_reset->reset_method) || + test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, + &fw_reset->reset_flags)) { + nack_request = true; + }
- if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) || - !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) { + devlink = priv_to_devlink(dev); + /* For external resets, try to acquire devl_lock. Skip if devlink reset is + * pending (lock already held) + */ + if (nack_request || + (!test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, + &fw_reset->reset_flags) && + !devl_trylock(devlink))) { err = mlx5_fw_reset_set_reset_sync_nack(dev); mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s", err ? "Failed" : "Sent"); return; } + if (mlx5_sync_reset_set_reset_requested(dev)) - return; + goto unlock; + + set_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags);
err = mlx5_fw_reset_set_reset_sync_ack(dev); if (err) mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err); else mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n"); + +unlock: + if (!test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) + devl_unlock(devlink); }
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id) @@ -710,6 +744,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, true)) return; + + clear_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags); mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); }
@@ -746,6 +782,7 @@ static void mlx5_sync_reset_timeout_work(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, true)) return; + clear_bit(MLX5_FW_RESET_FLAGS_RESET_IN_PROGRESS, &fw_reset->reset_flags); mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n"); }
@@ -832,7 +869,8 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev) cancel_work_sync(&fw_reset->reset_reload_work); cancel_work_sync(&fw_reset->reset_now_work); cancel_work_sync(&fw_reset->reset_abort_work); - cancel_delayed_work(&fw_reset->reset_timeout_work); + if (test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) + mlx5_sync_reset_clear_reset_requested(dev, true); }
static const struct devlink_param mlx5_fw_reset_devlink_params[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h index d5b28525c960..2d96b2adc1cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h @@ -10,6 +10,7 @@ int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_ty int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel, struct netlink_ext_ack *extack); int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev); +bool mlx5_fw_reset_in_progress(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev); void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 11d8739b9497..e97b3494b916 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -2196,6 +2196,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n"); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); + mlx5_drain_fw_reset(dev); mlx5_drain_health_wq(dev); err = mlx5_try_fast_unload(dev); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 5afe6b155ef0..81935f87bfcd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -440,7 +440,9 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table, rhashtable_remove_fast(&mr_table->route_ht, &mr_orig_route->ht_node, mlxsw_sp_mr_route_ht_params); + mutex_lock(&mr_table->route_list_lock); list_del(&mr_orig_route->node); + mutex_unlock(&mr_table->route_list_lock); mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route); }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 511cd92e0e3e..7066bc5612c6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2265,6 +2265,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n, if (!neigh_entry) return NULL;
+ neigh_hold(n); neigh_entry->key.n = n; neigh_entry->rif = rif; INIT_LIST_HEAD(&neigh_entry->nexthop_list); @@ -2274,6 +2275,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry) { + neigh_release(neigh_entry->key.n); kfree(neigh_entry); }
@@ -2858,6 +2860,11 @@ static int mlxsw_sp_router_schedule_work(struct net *net, if (!net_work) return NOTIFY_BAD;
+ /* Take a reference to ensure the neighbour won't be destructed until + * we drop the reference in the work item. + */ + neigh_clone(n); + INIT_WORK(&net_work->work, cb); net_work->mlxsw_sp = router->mlxsw_sp; net_work->n = n; @@ -2881,11 +2888,6 @@ static int mlxsw_sp_router_schedule_neigh_work(struct mlxsw_sp_router *router, struct net *net;
net = neigh_parms_net(n->parms); - - /* Take a reference to ensure the neighbour won't be destructed until we - * drop the reference in delayed work. - */ - neigh_clone(n); return mlxsw_sp_router_schedule_work(net, router, n, mlxsw_sp_router_neigh_event_work); } @@ -4320,6 +4322,8 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, if (err) goto err_neigh_entry_insert;
+ neigh_release(old_n); + read_lock_bh(&n->lock); nud_state = n->nud_state; dead = n->dead; @@ -4328,14 +4332,10 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list, neigh_list_node) { - neigh_release(old_n); - neigh_clone(n); __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); }
- neigh_release(n); - return 0;
err_neigh_entry_insert: @@ -4428,6 +4428,11 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, } }
+ /* Release the reference taken by neigh_lookup() / neigh_create() since + * neigh_entry already holds one. + */ + neigh_release(n); + /* If that is the first nexthop connected to that neigh, add to * nexthop_neighs_list */ @@ -4454,11 +4459,9 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; - struct neighbour *n;
if (!neigh_entry) return; - n = neigh_entry->key.n;
__mlxsw_sp_nexthop_neigh_update(nh, true); list_del(&nh->neigh_list_node); @@ -4472,8 +4475,6 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list)) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); - - neigh_release(n); }
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bec5f6823775..9c40c62080c9 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2724,9 +2724,6 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
static void rtl_prepare_power_down(struct rtl8169_private *tp) { - if (tp->dash_enabled) - return; - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || tp->mac_version == RTL_GIGA_MAC_VER_33) rtl_ephy_write(tp, 0x19, 0xff64); @@ -4862,7 +4859,7 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl_disable_exit_l1(tp); rtl_prepare_power_down(tp);
- if (tp->dash_type != RTL_DASH_NONE) + if (tp->dash_type != RTL_DASH_NONE && !tp->saved_wolopts) rtl8168_driver_stop(tp); }
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index a5e23e2da90f..953a1d22e60a 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -516,15 +516,7 @@ static inline void smc_rcv(struct net_device *dev) * any other concurrent access and C would always interrupt B. But life * isn't that easy in a SMP world... */ -#define smc_special_trylock(lock, flags) \ -({ \ - int __ret; \ - local_irq_save(flags); \ - __ret = spin_trylock(lock); \ - if (!__ret) \ - local_irq_restore(flags); \ - __ret; \ -}) +#define smc_special_trylock(lock, flags) spin_trylock_irqsave(lock, flags) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ce35a6f12679..112287a6e9ab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -87,6 +87,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_XDP_CONSUMED BIT(0) #define STMMAC_XDP_TX BIT(1) #define STMMAC_XDP_REDIRECT BIT(2) +#define STMMAC_XSK_CONSUMED BIT(3)
static int flow_ctrl = FLOW_AUTO; module_param(flow_ctrl, int, 0644); @@ -4998,6 +4999,7 @@ static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, struct xdp_buff *xdp) { + bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL); struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); int cpu = smp_processor_id(); struct netdev_queue *nq; @@ -5014,9 +5016,18 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq);
- res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); - if (res == STMMAC_XDP_TX) + /* For zero copy XDP_TX action, dma_map is true */ + res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc); + if (res == STMMAC_XDP_TX) { stmmac_flush_tx_descriptors(priv, queue); + } else if (res == STMMAC_XDP_CONSUMED && zc) { + /* xdp has been freed by xdp_convert_buff_to_frame(), + * no need to call xsk_buff_free() again, so return + * STMMAC_XSK_CONSUMED. + */ + res = STMMAC_XSK_CONSUMED; + xdp_return_frame(xdpf); + }
__netif_tx_unlock(nq);
@@ -5366,6 +5377,8 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) break; case STMMAC_XDP_CONSUMED: xsk_buff_free(buf->xdp); + fallthrough; + case STMMAC_XSK_CONSUMED: rx_dropped++; break; case STMMAC_XDP_TX: diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index b9b5554ea862..5ad2673f213d 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -334,7 +334,7 @@ int fjes_hw_init(struct fjes_hw *hw)
ret = fjes_hw_reset(hw); if (ret) - return ret; + goto err_iounmap;
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
@@ -347,8 +347,10 @@ int fjes_hw_init(struct fjes_hw *hw) hw->max_epid = fjes_hw_get_max_epid(hw); hw->my_epid = fjes_hw_get_my_epid(hw);
- if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) - return -ENXIO; + if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) { + ret = -ENXIO; + goto err_iounmap; + }
ret = fjes_hw_setup(hw);
@@ -356,6 +358,10 @@ int fjes_hw_init(struct fjes_hw *hw) hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
return ret; + +err_iounmap: + fjes_hw_iounmap(hw); + return ret; }
void fjes_hw_exit(struct fjes_hw *hw) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index ca62188a317a..83bd65a22770 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -737,6 +737,9 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, struct ethhdr *eth = eth_hdr(skb); rx_handler_result_t ret = RX_HANDLER_PASS;
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + if (is_multicast_ether_addr(eth->h_dest)) { if (ipvlan_external_frame(skb, port)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index c2170650415c..4f2bd20cdc05 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -63,6 +63,13 @@ static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+ /* Workaround for read-after-write issue. + * The controller may return stale data if a read follows immediately + * after a write. A dummy read forces the hardware to update its + * internal state, ensuring that the next real read returns correct data. + */ + ioread32(ctx->base + ASPEED_MDIO_CTRL); + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, !(ctrl & ASPEED_MDIO_CTRL_FIRE), ASPEED_MDIO_INTERVAL_US, diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c index b3a5a0af19da..82036efbf7ee 100644 --- a/drivers/net/phy/marvell-88q2xxx.c +++ b/drivers/net/phy/marvell-88q2xxx.c @@ -647,7 +647,7 @@ static int mv88q2xxx_hwmon_write(struct device *dev,
switch (attr) { case hwmon_temp_max: - clamp_val(val, -75000, 180000); + val = clamp(val, -75000, 180000); val = (val / 1000) + 75; val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK, val); diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index 94c40c5cebdd..50732f9699ee 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -877,7 +877,7 @@ static void __team_queue_override_enabled_check(struct team *team) static void team_queue_override_port_prio_changed(struct team *team, struct team_port *port) { - if (!port->queue_id || team_port_enabled(port)) + if (!port->queue_id || !team_port_enabled(port)) return; __team_queue_override_port_del(team, port); __team_queue_override_port_add(team, port); diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 7fd763917ae2..6ab3486072cb 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -335,6 +335,11 @@ int asix_read_phy_addr(struct usbnet *dev, bool internal) offset = (internal ? 1 : 0); ret = buf[offset];
+ if (ret >= PHY_MAX_ADDR) { + netdev_err(dev->net, "invalid PHY address: %d\n", ret); + return -ENODEV; + } + netdev_dbg(dev->net, "%s PHY address 0x%x\n", internal ? "internal" : "external", ret);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 278e6cb6f4d9..e40b0669d9f4 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -211,6 +211,8 @@ static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg) if (res == -ENODEV) netif_device_detach(dev->netdev); dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res); + kfree(req); + usb_free_urb(async_urb); } return res; } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index cb7d2f798fb4..9587eb98cdb3 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -52,7 +52,7 @@ static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value) { - return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, + return usbnet_write_cmd(dev, SR_WR_REG, SR_REQ_WR_REG, value, reg, NULL, 0); }
@@ -65,7 +65,7 @@ static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value) { - usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, + usbnet_write_cmd_async(dev, SR_WR_REG, SR_REQ_WR_REG, value, reg, NULL, 0); }
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 0ff7357c3c91..f1f61d85d949 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -702,6 +702,7 @@ void usbnet_resume_rx(struct usbnet *dev) struct sk_buff *skb; int num = 0;
+ local_bh_disable(); clear_bit(EVENT_RX_PAUSED, &dev->flags);
while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) { @@ -710,6 +711,7 @@ void usbnet_resume_rx(struct usbnet *dev) }
tasklet_schedule(&dev->bh); + local_bh_enable();
netif_dbg(dev, rx_status, dev->net, "paused rx queue disabled, %d skbs requeued\n", num); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c index c3a602197662..abe7f6501e5e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c @@ -24,6 +24,10 @@ static const struct brcmf_dmi_data acepc_t8_data = { BRCM_CC_4345_CHIP_ID, 6, "acepc-t8" };
+static const struct brcmf_dmi_data acer_a1_840_data = { + BRCM_CC_43340_CHIP_ID, 2, "acer-a1-840" +}; + /* The Chuwi Hi8 Pro uses the same Ampak AP6212 module as the Chuwi Vi8 Plus * and the nvram for the Vi8 Plus is already in linux-firmware, so use that. */ @@ -91,6 +95,16 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&acepc_t8_data, }, + { + /* Acer Iconia One 8 A1-840 (non FHD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Above strings are too generic also match BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"), + }, + .driver_data = (void *)&acer_a1_840_data, + }, { /* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */ .matches = { diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index a987c5e4eff6..6ce8e4af18fe 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -253,6 +253,19 @@ mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) return prop->value; }
+static const s8 * +mt76_get_of_array_s8(struct device_node *np, char *name, size_t *len, int min) +{ + struct property *prop = of_find_property(np, name, NULL); + + if (!prop || !prop->value || prop->length < min) + return NULL; + + *len = prop->length; + + return prop->value; +} + struct device_node * mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) { @@ -294,7 +307,7 @@ mt76_get_txs_delta(struct device_node *np, u8 nss) }
static void -mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data, +mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const s8 *data, s8 target_power, s8 nss_delta, s8 *max_power) { int i; @@ -303,15 +316,14 @@ mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data, return;
for (i = 0; i < pwr_len; i++) { - pwr[i] = min_t(s8, target_power, - be32_to_cpu(data[i]) + nss_delta); + pwr[i] = min_t(s8, target_power, data[i] + nss_delta); *max_power = max(*max_power, pwr[i]); } }
static void mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num, - const __be32 *data, size_t len, s8 target_power, + const s8 *data, size_t len, s8 target_power, s8 nss_delta, s8 *max_power) { int i, cur; @@ -319,8 +331,7 @@ mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num, if (!data) return;
- len /= 4; - cur = be32_to_cpu(data[0]); + cur = data[0]; for (i = 0; i < pwr_num; i++) { if (len < pwr_len + 1) break; @@ -335,7 +346,7 @@ mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num, if (!len) break;
- cur = be32_to_cpu(data[0]); + cur = data[0]; } }
@@ -346,7 +357,7 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy, { struct mt76_dev *dev = phy->dev; struct device_node *np; - const __be32 *val; + const s8 *val; char name[16]; u32 mcs_rates = dev->drv->mcs_rates; u32 ru_rates = ARRAY_SIZE(dest->ru[0]); @@ -392,21 +403,21 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
txs_delta = mt76_get_txs_delta(np, hweight16(phy->chainmask));
- val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck)); + val = mt76_get_of_array_s8(np, "rates-cck", &len, ARRAY_SIZE(dest->cck)); mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val, target_power, txs_delta, &max_power);
- val = mt76_get_of_array(np, "rates-ofdm", - &len, ARRAY_SIZE(dest->ofdm)); + val = mt76_get_of_array_s8(np, "rates-ofdm", + &len, ARRAY_SIZE(dest->ofdm)); mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val, target_power, txs_delta, &max_power);
- val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1); + val = mt76_get_of_array_s8(np, "rates-mcs", &len, mcs_rates + 1); mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]), ARRAY_SIZE(dest->mcs), val, len, target_power, txs_delta, &max_power);
- val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1); + val = mt76_get_of_array_s8(np, "rates-ru", &len, ru_rates + 1); mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]), ARRAY_SIZE(dest->ru), val, len, target_power, txs_delta, &max_power); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 376975388007..4f0c840ef93d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -1249,7 +1249,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw, phy->mt76);
if (!mt7615_dev_running(dev)) - err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
mt7615_mutex_release(dev);
@@ -1271,7 +1271,7 @@ static int mt7615_resume(struct ieee80211_hw *hw) if (!running) { int err;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true); if (err < 0) { mt7615_mutex_release(dev); return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index 9f43e673518b..9a278589df4e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -83,7 +83,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state) hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && mt7615_firmware_offload(dev); if (hif_suspend) { - err = mt76_connac_mcu_set_hif_suspend(mdev, true); + err = mt76_connac_mcu_set_hif_suspend(mdev, true, true); if (err) return err; } @@ -131,7 +131,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state) } napi_enable(&mdev->tx_napi); if (hif_suspend) - mt76_connac_mcu_set_hif_suspend(mdev, false); + mt76_connac_mcu_set_hif_suspend(mdev, false, true);
return err; } @@ -175,7 +175,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && mt7615_firmware_offload(dev)) - err = mt76_connac_mcu_set_hif_suspend(mdev, false); + err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index aebfc4576aa4..f56038cd4d3a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -191,7 +191,7 @@ static int mt7663s_suspend(struct device *dev) mt7615_firmware_offload(mdev)) { int err;
- err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true); + err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true, true); if (err < 0) return err; } @@ -230,7 +230,7 @@ static int mt7663s_resume(struct device *dev)
if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && mt7615_firmware_offload(mdev)) - err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false); + err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false, true);
return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index 5020af52c68c..4aa9fa1c4a23 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -225,7 +225,7 @@ static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state) mt7615_firmware_offload(dev)) { int err;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true); if (err < 0) return err; } @@ -253,7 +253,7 @@ static int mt7663u_resume(struct usb_interface *intf)
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && mt7615_firmware_offload(dev)) - err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index a6324f6ead78..462b4a68c4f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -2527,7 +2527,7 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76_phy *phy, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_wow_ctrl);
-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) +int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp) { struct { struct { @@ -2559,7 +2559,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) req.hdr.hif_type = 0;
return mt76_mcu_send_msg(dev, MCU_UNI_CMD(HIF_CTRL), &req, - sizeof(req), true); + sizeof(req), wait_resp); } EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 57a8340fa700..6901971f5da6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -1049,6 +1049,7 @@ enum { /* unified event table */ enum { MCU_UNI_EVENT_RESULT = 0x01, + MCU_UNI_EVENT_HIF_CTRL = 0x03, MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04, MCU_UNI_EVENT_ACCESS_REG = 0x6, MCU_UNI_EVENT_IE_COUNTDOWN = 0x09, @@ -1989,7 +1990,7 @@ int mt76_connac_mcu_set_suspend_mode(struct mt76_dev *dev, struct ieee80211_vif *vif, bool enable, u8 mdtim, bool wow_suspend); -int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend); +int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp); void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); int mt76_connac_sta_state_dp(struct mt76_dev *dev, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 02c1de8620a7..8d3f3c8b1a88 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -637,10 +637,10 @@ int mt7921_run_firmware(struct mt792x_dev *dev) if (err) return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); err = mt7921_load_clc(dev, mt792x_ram_name(dev)); if (err) return err; + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return mt7921_mcu_fw_log_2_host(dev, 1); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 67723c22aea6..6da90c6238de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -435,7 +435,7 @@ static int mt7921_pci_suspend(struct device *device) if (err < 0) goto restore_suspend;
- err = mt76_connac_mcu_set_hif_suspend(mdev, true); + err = mt76_connac_mcu_set_hif_suspend(mdev, true, true); if (err) goto restore_suspend;
@@ -481,7 +481,7 @@ static int mt7921_pci_suspend(struct device *device) if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- mt76_connac_mcu_set_hif_suspend(mdev, false); + mt76_connac_mcu_set_hif_suspend(mdev, false, true);
restore_suspend: pm->suspended = false; @@ -532,7 +532,7 @@ static int mt7921_pci_resume(struct device *device) if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
- err = mt76_connac_mcu_set_hif_suspend(mdev, false); + err = mt76_connac_mcu_set_hif_suspend(mdev, false, true); if (err < 0) goto failed;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index 95f526f7bb99..45b9f35aab17 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -240,7 +240,7 @@ static int mt7921s_suspend(struct device *__dev) mt76s_txqs_empty(&dev->mt76), 5 * HZ);
/* It is supposed that SDIO bus is idle at the point */ - err = mt76_connac_mcu_set_hif_suspend(mdev, true); + err = mt76_connac_mcu_set_hif_suspend(mdev, true, true); if (err) goto restore_worker;
@@ -258,7 +258,7 @@ static int mt7921s_suspend(struct device *__dev) restore_txrx_worker: mt76_worker_enable(&mdev->sdio.net_worker); mt76_worker_enable(&mdev->sdio.txrx_worker); - mt76_connac_mcu_set_hif_suspend(mdev, false); + mt76_connac_mcu_set_hif_suspend(mdev, false, true);
restore_worker: mt76_worker_enable(&mdev->tx_worker); @@ -302,7 +302,7 @@ static int mt7921s_resume(struct device *__dev) if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(mdev, false);
- err = mt76_connac_mcu_set_hif_suspend(mdev, false); + err = mt76_connac_mcu_set_hif_suspend(mdev, false, true); failed: pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index 5be0edb2fe9a..100bdba32ba5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -263,7 +263,7 @@ static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state) pm->suspended = true; flush_work(&dev->reset_work);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true); if (err) goto failed;
@@ -313,7 +313,7 @@ static int mt7921u_resume(struct usb_interface *intf) if (err < 0) goto failed;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true); failed: pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index 5123a720413f..105ba5d0934c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -59,6 +59,18 @@ static int mt7925_thermal_init(struct mt792x_phy *phy) mt7925_hwmon_groups); return PTR_ERR_OR_ZERO(hwmon); } + +void mt7925_regd_update(struct mt792x_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + struct ieee80211_hw *hw = mdev->hw; + + mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env); + mt7925_mcu_set_channel_domain(hw->priv); + mt7925_set_tx_sar_pwr(hw, NULL); +} +EXPORT_SYMBOL_GPL(mt7925_regd_update); + static void mt7925_regd_notifier(struct wiphy *wiphy, struct regulatory_request *req) @@ -66,6 +78,7 @@ mt7925_regd_notifier(struct wiphy *wiphy, struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt76_dev *mdev = &dev->mt76; + struct mt76_connac_pm *pm = &dev->pm;
/* allow world regdom at the first boot only */ if (!memcmp(req->alpha2, "00", 2) && @@ -81,11 +94,15 @@ mt7925_regd_notifier(struct wiphy *wiphy, mdev->region = req->dfs_region; dev->country_ie_env = req->country_ie_env;
+ if (pm->suspended) + return; + + dev->regd_in_progress = true; mt792x_mutex_acquire(dev); - mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env); - mt7925_mcu_set_channel_domain(hw->priv); - mt7925_set_tx_sar_pwr(hw, NULL); + mt7925_regd_update(dev); mt792x_mutex_release(dev); + dev->regd_in_progress = false; + wake_up(&dev->wait); }
static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev) @@ -235,6 +252,7 @@ int mt7925_register_device(struct mt792x_dev *dev) spin_lock_init(&dev->pm.wake.lock); mutex_init(&dev->pm.mutex); init_waitqueue_head(&dev->pm.wait); + init_waitqueue_head(&dev->wait); spin_lock_init(&dev->pm.txq_lock); INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7925_scan_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index e42b4f0abbe7..847a1069f41e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -39,7 +39,6 @@ int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || cmd == MCU_UNI_CMD(STA_REC_UPDATE) || - cmd == MCU_UNI_CMD(HIF_CTRL) || cmd == MCU_UNI_CMD(OFFLOAD) || cmd == MCU_UNI_CMD(SUSPEND)) { struct mt7925_mcu_uni_event *event; @@ -341,6 +340,51 @@ static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev, jiffies + msecs_to_jiffies(duration)); }
+static void +mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv) +{ + struct mt7925_mcu_hif_ctrl_basic_tlv *basic; + + basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv; + + if (basic->hifsuspend) { + if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE && + basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE) + /* success */ + dev->hif_idle = true; + else + /* busy */ + /* invalid */ + dev->hif_idle = false; + } else { + dev->hif_resumed = true; + } + wake_up(&dev->wait); +} + +static void +mt7925_mcu_uni_hif_ctrl_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct tlv *tlv; + u32 tlv_len; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + tlv = (struct tlv *)skb->data; + tlv_len = skb->len; + + while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) { + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_HIF_CTRL_BASIC: + mt7925_mcu_handle_hif_ctrl_basic(dev, tlv); + break; + default: + break; + } + tlv_len -= le16_to_cpu(tlv->len); + tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len)); + } +} + static void mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) { @@ -487,6 +531,9 @@ mt7925_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev, rxd = (struct mt7925_mcu_rxd *)skb->data;
switch (rxd->eid) { + case MCU_UNI_EVENT_HIF_CTRL: + mt7925_mcu_uni_hif_ctrl_event(dev, skb); + break; case MCU_UNI_EVENT_FW_LOG_2_HOST: mt7925_mcu_uni_debug_msg_event(dev, skb); break; @@ -958,10 +1005,10 @@ int mt7925_run_firmware(struct mt792x_dev *dev) if (err) return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); err = mt7925_load_clc(dev, mt792x_ram_name(dev)); if (err) return err; + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return mt7925_mcu_fw_log_2_host(dev, 1); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index c83b8a210498..27680ad28b60 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -27,6 +27,26 @@
#define MCU_UNI_EVENT_ROC 0x27
+#define HIF_TRAFFIC_IDLE 0x2 + +enum { + UNI_EVENT_HIF_CTRL_BASIC = 0, + UNI_EVENT_HIF_CTRL_TAG_NUM +}; + +struct mt7925_mcu_hif_ctrl_basic_tlv { + __le16 tag; + __le16 len; + u8 cid; + u8 pad[3]; + u32 status; + u8 hif_type; + u8 hif_tx_traffic_status; + u8 hif_rx_traffic_status; + u8 hifsuspend; + u8 rsv[4]; +} __packed; + enum { UNI_ROC_ACQUIRE, UNI_ROC_ABORT, @@ -218,6 +238,7 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd); int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, u8 bit_op, u32 bit_map);
+void mt7925_regd_update(struct mt792x_dev *dev); int mt7925_mac_init(struct mt792x_dev *dev); int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 5e428f19f972..a90e90131276 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -442,9 +442,10 @@ static int mt7925_pci_suspend(struct device *device) struct mt76_dev *mdev = pci_get_drvdata(pdev); struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt76_connac_pm *pm = &dev->pm; - int i, err; + int i, err, ret;
pm->suspended = true; + dev->hif_resumed = false; flush_work(&dev->reset_work); cancel_delayed_work_sync(&pm->ps_work); cancel_work_sync(&pm->wake_work); @@ -455,14 +456,21 @@ static int mt7925_pci_suspend(struct device *device) if (err < 0) goto restore_suspend;
+ wait_event_timeout(dev->wait, + !dev->regd_in_progress, 5 * HZ); + /* always enable deep sleep during suspend to reduce * power consumption */ mt7925_mcu_set_deep_sleep(dev, true);
- err = mt76_connac_mcu_set_hif_suspend(mdev, true); - if (err) + mt76_connac_mcu_set_hif_suspend(mdev, true, false); + ret = wait_event_timeout(dev->wait, + dev->hif_idle, 3 * HZ); + if (!ret) { + err = -ETIMEDOUT; goto restore_suspend; + }
napi_disable(&mdev->tx_napi); mt76_worker_disable(&mdev->tx_worker); @@ -503,8 +511,11 @@ static int mt7925_pci_suspend(struct device *device) if (!pm->ds_enable) mt7925_mcu_set_deep_sleep(dev, false);
- mt76_connac_mcu_set_hif_suspend(mdev, false); - + mt76_connac_mcu_set_hif_suspend(mdev, false, false); + ret = wait_event_timeout(dev->wait, + dev->hif_resumed, 3 * HZ); + if (!ret) + err = -ETIMEDOUT; restore_suspend: pm->suspended = false;
@@ -520,8 +531,9 @@ static int mt7925_pci_resume(struct device *device) struct mt76_dev *mdev = pci_get_drvdata(pdev); struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt76_connac_pm *pm = &dev->pm; - int i, err; + int i, err, ret;
+ dev->hif_idle = false; err = mt792x_mcu_drv_pmctrl(dev); if (err < 0) goto failed; @@ -550,12 +562,19 @@ static int mt7925_pci_resume(struct device *device) napi_schedule(&mdev->tx_napi); local_bh_enable();
- err = mt76_connac_mcu_set_hif_suspend(mdev, false); + mt76_connac_mcu_set_hif_suspend(mdev, false, false); + ret = wait_event_timeout(dev->wait, + dev->hif_resumed, 3 * HZ); + if (!ret) { + err = -ETIMEDOUT; + goto failed; + }
/* restore previous ds setting */ if (!pm->ds_enable) mt7925_mcu_set_deep_sleep(dev, false);
+ mt7925_regd_update(dev); failed: pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c index a63ff630eaba..bf040f34e4b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c @@ -246,14 +246,19 @@ static int mt7925u_suspend(struct usb_interface *intf, pm_message_t state) { struct mt792x_dev *dev = usb_get_intfdata(intf); struct mt76_connac_pm *pm = &dev->pm; - int err; + int err, ret;
pm->suspended = true; + dev->hif_resumed = false; flush_work(&dev->reset_work);
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); - if (err) + mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, false); + ret = wait_event_timeout(dev->wait, + dev->hif_idle, 3 * HZ); + if (!ret) { + err = -ETIMEDOUT; goto failed; + }
mt76u_stop_rx(&dev->mt76); mt76u_stop_tx(&dev->mt76); @@ -274,8 +279,9 @@ static int mt7925u_resume(struct usb_interface *intf) struct mt792x_dev *dev = usb_get_intfdata(intf); struct mt76_connac_pm *pm = &dev->pm; bool reinit = true; - int err, i; + int err, i, ret;
+ dev->hif_idle = false; for (i = 0; i < 10; i++) { u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
@@ -301,7 +307,11 @@ static int mt7925u_resume(struct usb_interface *intf) if (err < 0) goto failed;
- err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); + mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, false); + ret = wait_event_timeout(dev->wait, + dev->hif_resumed, 3 * HZ); + if (!ret) + err = -ETIMEDOUT; failed: pm->suspended = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index 2b8b9b2977f7..cf1b6083cf2f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -216,6 +216,8 @@ struct mt792x_dev { bool has_eht:1; bool regd_in_progress:1; bool aspm_supported:1; + bool hif_idle:1; + bool hif_resumed:1; wait_queue_head_t wait;
struct work_struct init_work; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 260f72055013..b517df2db6d7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -1252,7 +1252,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw) opmode &= ~BW_OPMODE_20MHZ; rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode); rsr &= ~RSR_RSC_BANDWIDTH_40M; - if (sec_ch_above) + if (!sec_ch_above) rsr |= RSR_RSC_UPPER_SUB_CHANNEL; else rsr |= RSR_RSC_LOWER_SUB_CHANNEL; @@ -1321,9 +1321,8 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
for (i = RF_A; i < priv->rf_paths; i++) { val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG); - if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40) - val32 &= ~MODE_AG_CHANNEL_20MHZ; - else + val32 &= ~MODE_AG_BW_MASK; + if (hw->conf.chandef.width != NL80211_CHAN_WIDTH_40) val32 |= MODE_AG_CHANNEL_20MHZ; rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index aa702ba7c9f5..d6c35e8d02a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -511,7 +511,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, if (sta) { sta_entry = (struct rtl_sta_info *)sta->drv_priv; tid = ieee80211_get_tid(hdr); - agg_state = sta_entry->tids[tid].agg.agg_state; + if (tid < MAX_TID_COUNT) + agg_state = sta_entry->tids[tid].agg.agg_state; ampdu_density = sta->deflink.ht_cap.ampdu_density; }
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index d6bea5ec8e24..d8db341a5731 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -144,8 +144,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr) { + bool might_indirect_under_power_off = rtwdev->chip->id == RTW_CHIP_TYPE_8822C; + if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) && - !rtw_sdio_is_bus_addr(addr)) + !rtw_sdio_is_bus_addr(addr) && might_indirect_under_power_off) return false;
return !rtw_sdio_is_sdio30_supported(rtwdev) || diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index ffd7367ce119..018a80674f06 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -406,7 +406,7 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, "Reader power on cmd error %d\n", rc); - return rc; + return rc ?: -EINVAL; }
rc = usb_submit_urb(phy->in_urb, GFP_KERNEL); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 432efcbf9e2f..2e47c56b2d4b 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -591,7 +591,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) if (status > 0 && (status & NVME_STATUS_DNR)) return false;
- if (status == -EKEYREJECTED) + if (status == -EKEYREJECTED || status == -ENOKEY) return false;
if (ctrl->opts->max_reconnects == -1 || diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d01bd3c300fa..3d90ace0b537 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1462,14 +1462,14 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, { struct fcnvme_ls_disconnect_assoc_rqst *rqst = &lsop->rqstbuf->rq_dis_assoc; - struct nvme_fc_ctrl *ctrl, *ret = NULL; + struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL; struct nvmefc_ls_rcv_op *oldls = NULL; u64 association_id = be64_to_cpu(rqst->associd.association_id); unsigned long flags;
spin_lock_irqsave(&rport->lock, flags);
- list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) { if (!nvme_fc_ctrl_get(ctrl)) continue; spin_lock(&ctrl->lock); @@ -1482,7 +1482,9 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, if (ret) /* leave the ctrl get reference */ break; + spin_unlock_irqrestore(&rport->lock, flags); nvme_fc_ctrl_put(ctrl); + spin_lock_irqsave(&rport->lock, flags); }
spin_unlock_irqrestore(&rport->lock, flags); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 8c80f4dc8b3f..0940955d3701 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -501,8 +501,8 @@ void __init early_init_fdt_scan_reserved_mem(void) if (!initial_boot_params) return;
- fdt_scan_reserved_mem(); fdt_reserve_elfcorehdr(); + fdt_scan_reserved_mem();
/* Process header /memreserve/ fields */ for (n = 0; ; n++) { diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index a0daaa548bc3..8ba778170447 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -154,7 +154,9 @@ static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
/* switch IRQ's for devices below LASI/WAX to other CPU */ - gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR); + /* ASP chip (svers 0x70) does not support reprogramming */ + if (gsc_dev->gsc->id.sversion != 0x70) + gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
irq_data_update_effective_affinity(d, &tmask);
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 81f085cebf62..3f9b1f7131bf 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -46,7 +46,7 @@ #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc -#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 +#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 #define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8 @@ -55,6 +55,9 @@ #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804 +#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8 + #define PCIE_MISC_MISC_CTRL 0x4008 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80 #define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400 @@ -191,11 +194,11 @@ #define SSC_STATUS_PLL_LOCK_MASK 0x800 #define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_INDEX]) -#define DATA_ADDR(pcie) ((pcie)->reg_offsets[EXT_CFG_DATA]) -#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->reg_offsets[RGR1_SW_INIT_1]) -#define HARD_DEBUG(pcie) ((pcie)->reg_offsets[PCIE_HARD_DEBUG]) -#define INTR2_CPU_BASE(pcie) ((pcie)->reg_offsets[PCIE_INTR2_CPU_BASE]) +#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX]) +#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA]) +#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1]) +#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG]) +#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */ #define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700 @@ -276,8 +279,6 @@ struct brcm_pcie { int gen; u64 msi_target_addr; struct brcm_msi *msi; - const int *reg_offsets; - enum pcie_soc_base soc_base; struct reset_control *rescal; struct reset_control *perst_reset; struct reset_control *bridge_reset; @@ -285,17 +286,14 @@ struct brcm_pcie { int num_memc; u64 memc_size[PCIE_BRCM_MAX_MEMC]; u32 hw_rev; - int (*perst_set)(struct brcm_pcie *pcie, u32 val); - int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); struct subdev_regulators *sr; bool ep_wakeup_capable; - bool has_phy; - u8 num_inbound_wins; + const struct pcie_cfg_data *cfg; };
static inline bool is_bmips(const struct brcm_pcie *pcie) { - return pcie->soc_base == BCM7435 || pcie->soc_base == BCM7425; + return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425; }
/* @@ -855,7 +853,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * security considerations, and is not implemented in our modern * SoCs. */ - if (pcie->soc_base != BCM7712) + if (pcie->cfg->soc_base != BCM7712) add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) { @@ -872,10 +870,10 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * That being said, each BARs size must still be a power of * two. */ - if (pcie->soc_base == BCM7712) + if (pcie->cfg->soc_base == BCM7712) add_inbound_win(b++, &n, size, cpu_start, pcie_start);
- if (n > pcie->num_inbound_wins) + if (n > pcie->cfg->num_inbound_wins) break; }
@@ -889,7 +887,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * that enables multiple memory controllers. As such, it can return * now w/o doing special configuration. */ - if (pcie->soc_base == BCM7712) + if (pcie->cfg->soc_base == BCM7712) return n;
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, @@ -1012,7 +1010,7 @@ static void set_inbound_win_registers(struct brcm_pcie *pcie, * 7712: * All of their BARs need to be set. */ - if (pcie->soc_base == BCM7712) { + if (pcie->cfg->soc_base == BCM7712) { /* BUS remap register settings */ reg_offset = brcm_ubus_reg_offset(i); tmp = lower_32_bits(cpu_addr) & ~0xfff; @@ -1030,21 +1028,21 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) void __iomem *base = pcie->base; struct pci_host_bridge *bridge; struct resource_entry *entry; - u32 tmp, burst, aspm_support; + u32 tmp, burst, num_lanes, num_lanes_cap; u8 num_out_wins = 0; int num_inbound_wins = 0; int memc, ret;
/* Reset the bridge */ - ret = pcie->bridge_sw_init_set(pcie, 1); + ret = pcie->cfg->bridge_sw_init_set(pcie, 1); if (ret) return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */ - if (pcie->soc_base == BCM2711) { - ret = pcie->perst_set(pcie, 1); + if (pcie->cfg->soc_base == BCM2711) { + ret = pcie->cfg->perst_set(pcie, 1); if (ret) { - pcie->bridge_sw_init_set(pcie, 0); + pcie->cfg->bridge_sw_init_set(pcie, 0); return ret; } } @@ -1052,7 +1050,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) usleep_range(100, 200);
/* Take the bridge out of reset */ - ret = pcie->bridge_sw_init_set(pcie, 0); + ret = pcie->cfg->bridge_sw_init_set(pcie, 0); if (ret) return ret;
@@ -1072,9 +1070,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) */ if (is_bmips(pcie)) burst = 0x1; /* 256 bytes */ - else if (pcie->soc_base == BCM2711) + else if (pcie->cfg->soc_base == BCM2711) burst = 0x0; /* 128 bytes */ - else if (pcie->soc_base == BCM7278) + else if (pcie->cfg->soc_base == BCM7278) burst = 0x3; /* 512 bytes */ else burst = 0x2; /* 512 bytes */ @@ -1130,14 +1128,32 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
/* Don't advertise L0s capability if 'aspm-no-l0s' */ - aspm_support = PCIE_LINK_STATE_L1; - if (!of_property_read_bool(pcie->np, "aspm-no-l0s")) - aspm_support |= PCIE_LINK_STATE_L0S; tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); - u32p_replace_bits(&tmp, aspm_support, - PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); + if (of_property_read_bool(pcie->np, "aspm-no-l0s")) + tmp &= ~PCI_EXP_LNKCAP_ASPM_L0S; writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */ + num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); + num_lanes = 0; + + /* + * Use hardware negotiated Max Link Width value by default. If the + * "num-lanes" DT property is present, assume that the chip's default + * link width capability information is incorrect/undesired and use the + * specified value instead. + */ + if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) && + num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) { + u32p_replace_bits(&tmp, num_lanes, + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); + writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1); + u32p_replace_bits(&tmp, 1, + PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK); + writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1); + } + /* * For config space accesses on the RC, show the right class for * a PCIe-PCIe bridge (the default setting is to be EP mode). @@ -1199,7 +1215,7 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie) u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
/* 7712 does not have this (RGR1) timer */ - if (pcie->soc_base == BCM7712) + if (pcie->cfg->soc_base == BCM7712) return;
/* Each unit in timeout register is 1/216,000,000 seconds */ @@ -1281,7 +1297,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) brcm_pcie_set_gen(pcie, pcie->gen);
/* Unassert the fundamental reset */ - ret = pcie->perst_set(pcie, 0); + ret = pcie->cfg->perst_set(pcie, 0); if (ret) return ret;
@@ -1465,12 +1481,12 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie) { - return pcie->has_phy ? brcm_phy_cntl(pcie, 1) : 0; + return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0; }
static inline int brcm_phy_stop(struct brcm_pcie *pcie) { - return pcie->has_phy ? brcm_phy_cntl(pcie, 0) : 0; + return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0; }
static int brcm_pcie_turn_off(struct brcm_pcie *pcie) @@ -1481,7 +1497,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie) if (brcm_pcie_link_up(pcie)) brcm_pcie_enter_l23(pcie); /* Assert fundamental reset */ - ret = pcie->perst_set(pcie, 1); + ret = pcie->cfg->perst_set(pcie, 1); if (ret) return ret;
@@ -1496,7 +1512,7 @@ static int brcm_pcie_turn_off(struct brcm_pcie *pcie) writel(tmp, base + HARD_DEBUG(pcie));
/* Shutdown PCIe bridge */ - ret = pcie->bridge_sw_init_set(pcie, 1); + ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
return ret; } @@ -1584,7 +1600,7 @@ static int brcm_pcie_resume_noirq(struct device *dev) goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */ - pcie->bridge_sw_init_set(pcie, 0); + pcie->cfg->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */ tmp = readl(base + HARD_DEBUG(pcie)); @@ -1805,12 +1821,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->dev = &pdev->dev; pcie->np = np; - pcie->reg_offsets = data->offsets; - pcie->soc_base = data->soc_base; - pcie->perst_set = data->perst_set; - pcie->bridge_sw_init_set = data->bridge_sw_init_set; - pcie->has_phy = data->has_phy; - pcie->num_inbound_wins = data->num_inbound_wins; + pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) @@ -1845,7 +1856,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
- pcie->bridge_sw_init_set(pcie, 0); + pcie->cfg->bridge_sw_init_set(pcie, 0);
if (pcie->swinit_reset) { ret = reset_control_assert(pcie->swinit_reset); @@ -1884,7 +1895,8 @@ static int brcm_pcie_probe(struct platform_device *pdev) goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); - if (pcie->soc_base == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { + if (pcie->cfg->soc_base == BCM4908 && + pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) { dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n"); ret = -ENODEV; goto fail; @@ -1904,7 +1916,8 @@ static int brcm_pcie_probe(struct platform_device *pdev) } }
- bridge->ops = pcie->soc_base == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops; + bridge->ops = pcie->cfg->soc_base == BCM7425 ? + &brcm7425_pcie_ops : &brcm_pcie_ops; bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 7e9b6e4d4695..a00a2ce01045 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -635,6 +635,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state) struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver;
+ pci_dev->state_saved = false; + if (drv && drv->suspend) { pci_power_t prev = pci_dev->current_state; int error; @@ -1039,6 +1041,8 @@ static int pci_pm_freeze(struct device *dev)
if (!pm) { pci_pm_default_suspend(pci_dev); + if (!pm_runtime_suspended(dev)) + pci_dev->state_saved = false; return 0; }
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index 2158a5975c90..f2f2b358ad92 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -1412,8 +1412,10 @@ void arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match *impl_match)
/* Unbind the driver from all matching backend devices. */ while ((dev = driver_find_device(&arm_cspmu_driver.driver, NULL, - match, arm_cspmu_match_device))) + match, arm_cspmu_match_device))) { device_release_driver(dev); + put_device(dev); + }
mutex_lock(&arm_cspmu_lock);
diff --git a/drivers/phy/broadcom/phy-bcm63xx-usbh.c b/drivers/phy/broadcom/phy-bcm63xx-usbh.c index 647644de041b..29fd6791bae6 100644 --- a/drivers/phy/broadcom/phy-bcm63xx-usbh.c +++ b/drivers/phy/broadcom/phy-bcm63xx-usbh.c @@ -375,7 +375,7 @@ static struct phy *bcm63xx_usbh_phy_xlate(struct device *dev, return of_phy_simple_xlate(dev, args); }
-static int __init bcm63xx_usbh_phy_probe(struct platform_device *pdev) +static int bcm63xx_usbh_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct bcm63xx_usbh_phy *usbh; @@ -432,7 +432,7 @@ static int __init bcm63xx_usbh_phy_probe(struct platform_device *pdev) return 0; }
-static const struct of_device_id bcm63xx_usbh_phy_ids[] __initconst = { +static const struct of_device_id bcm63xx_usbh_phy_ids[] = { { .compatible = "brcm,bcm6318-usbh-phy", .data = &usbh_bcm6318 }, { .compatible = "brcm,bcm6328-usbh-phy", .data = &usbh_bcm6328 }, { .compatible = "brcm,bcm6358-usbh-phy", .data = &usbh_bcm6358 }, @@ -443,7 +443,7 @@ static const struct of_device_id bcm63xx_usbh_phy_ids[] __initconst = { }; MODULE_DEVICE_TABLE(of, bcm63xx_usbh_phy_ids);
-static struct platform_driver bcm63xx_usbh_phy_driver __refdata = { +static struct platform_driver bcm63xx_usbh_phy_driver = { .driver = { .name = "bcm63xx-usbh-phy", .of_match_table = bcm63xx_usbh_phy_ids, diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index c6ef77472d9a..8a7eb11df902 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -349,7 +349,7 @@ struct rzg2l_pinctrl { spinlock_t bitmap_lock; /* protect tint_slot bitmap */ unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT];
- spinlock_t lock; /* lock read/write registers */ + raw_spinlock_t lock; /* lock read/write registers */ struct mutex mutex; /* serialize adding groups and functions */
struct rzg2l_pinctrl_pin_settings *settings; @@ -454,7 +454,7 @@ static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl, unsigned long flags; u32 reg;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Set pin to 'Non-use (Hi-Z input protection)' */ reg = readw(pctrl->base + PM(off)); @@ -478,7 +478,7 @@ static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
pctrl->data->pwpr_pfc_lock_unlock(pctrl, true);
- spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); };
static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev, @@ -805,10 +805,10 @@ static void rzg2l_rmw_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset, addr += 4; }
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); reg = readl(addr) & ~(mask << (bit * 8)); writel(reg | (val << (bit * 8)), addr); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); }
static int rzg2l_caps_to_pwr_reg(const struct rzg2l_register_offsets *regs, u32 caps) @@ -1036,14 +1036,14 @@ static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oe if (bit < 0) return bit;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readb(pctrl->base + ETH_MODE); if (oen) val &= ~BIT(bit); else val |= BIT(bit); writeb(val, pctrl->base + ETH_MODE); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0; } @@ -1089,14 +1089,14 @@ static int rzg3s_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oe if (bit < 0) return bit;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readb(pctrl->base + ETH_MODE); if (oen) val &= ~BIT(bit); else val |= BIT(bit); writeb(val, pctrl->base + ETH_MODE); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0; } @@ -1201,7 +1201,7 @@ static int rzv2h_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oe u8 pwpr;
bit = rzv2h_pin_to_oen_bit(pctrl, _pin); - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readb(pctrl->base + PFC_OEN); if (oen) val &= ~BIT(bit); @@ -1212,7 +1212,7 @@ static int rzv2h_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oe writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr); writeb(val, pctrl->base + PFC_OEN); writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0; } @@ -1613,14 +1613,14 @@ static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset) if (ret) return ret;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Select GPIO mode in PMC Register */ reg8 = readb(pctrl->base + PMC(off)); reg8 &= ~BIT(bit); pctrl->data->pmc_writeb(pctrl, reg8, PMC(off));
- spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0; } @@ -1635,7 +1635,7 @@ static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 offset, unsigned long flags; u16 reg16;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags);
reg16 = readw(pctrl->base + PM(off)); reg16 &= ~(PM_MASK << (bit * 2)); @@ -1643,7 +1643,7 @@ static void rzg2l_gpio_set_direction(struct rzg2l_pinctrl *pctrl, u32 offset, reg16 |= (output ? PM_OUTPUT : PM_INPUT) << (bit * 2); writew(reg16, pctrl->base + PM(off));
- spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); }
static int rzg2l_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) @@ -1687,7 +1687,7 @@ static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset, unsigned long flags; u8 reg8;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags);
reg8 = readb(pctrl->base + P(off));
@@ -1696,7 +1696,7 @@ static void rzg2l_gpio_set(struct gpio_chip *chip, unsigned int offset, else writeb(reg8 & ~BIT(bit), pctrl->base + P(off));
- spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); }
static int rzg2l_gpio_direction_output(struct gpio_chip *chip, @@ -2236,14 +2236,13 @@ static int rzg2l_gpio_get_gpioint(unsigned int virq, struct rzg2l_pinctrl *pctrl return gpioint; }
-static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl, - unsigned int hwirq, bool enable) +static void __rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl, + unsigned int hwirq, bool enable) { const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[hwirq]; u64 *pin_data = pin_desc->drv_data; u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data); u8 bit = RZG2L_PIN_ID_TO_PIN(hwirq); - unsigned long flags; void __iomem *addr;
addr = pctrl->base + ISEL(off); @@ -2252,12 +2251,20 @@ static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl, addr += 4; }
- spin_lock_irqsave(&pctrl->lock, flags); if (enable) writel(readl(addr) | BIT(bit * 8), addr); else writel(readl(addr) & ~BIT(bit * 8), addr); - spin_unlock_irqrestore(&pctrl->lock, flags); +} + +static void rzg2l_gpio_irq_endisable(struct rzg2l_pinctrl *pctrl, + unsigned int hwirq, bool enable) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + __rzg2l_gpio_irq_endisable(pctrl, hwirq, enable); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); }
static void rzg2l_gpio_irq_disable(struct irq_data *d) @@ -2269,15 +2276,25 @@ static void rzg2l_gpio_irq_disable(struct irq_data *d) gpiochip_disable_irq(gc, hwirq); }
-static void rzg2l_gpio_irq_enable(struct irq_data *d) +static void __rzg2l_gpio_irq_enable(struct irq_data *d, bool lock) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct rzg2l_pinctrl *pctrl = container_of(gc, struct rzg2l_pinctrl, gpio_chip); unsigned int hwirq = irqd_to_hwirq(d);
gpiochip_enable_irq(gc, hwirq); + if (lock) + rzg2l_gpio_irq_endisable(pctrl, hwirq, true); + else + __rzg2l_gpio_irq_endisable(pctrl, hwirq, true); irq_chip_enable_parent(d); }
+static void rzg2l_gpio_irq_enable(struct irq_data *d) +{ + __rzg2l_gpio_irq_enable(d, true); +} + static int rzg2l_gpio_irq_set_type(struct irq_data *d, unsigned int type) { return irq_chip_set_type_parent(d, type); @@ -2438,11 +2455,11 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl) * This has to be atomically executed to protect against a concurrent * interrupt. */ - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data)); if (!ret && !irqd_irq_disabled(data)) - rzg2l_gpio_irq_enable(data); - spin_unlock_irqrestore(&pctrl->lock, flags); + __rzg2l_gpio_irq_enable(data, false); + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
if (ret) dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq); @@ -2765,7 +2782,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev) "failed to enable GPIO clk\n"); }
- spin_lock_init(&pctrl->lock); + raw_spin_lock_init(&pctrl->lock); spin_lock_init(&pctrl->bitmap_lock); mutex_init(&pctrl->mutex); atomic_set(&pctrl->wakeup_path, 0); @@ -2908,7 +2925,7 @@ static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl) u32 nports = pctrl->data->n_port_pins / RZG2L_PINS_PER_PORT; unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); pctrl->data->pwpr_pfc_lock_unlock(pctrl, false);
/* Restore port registers. */ @@ -2953,7 +2970,7 @@ static void rzg2l_pinctrl_pm_setup_pfc(struct rzg2l_pinctrl *pctrl) }
pctrl->data->pwpr_pfc_lock_unlock(pctrl, true); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); }
static int rzg2l_pinctrl_suspend_noirq(struct device *dev) diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index 5ac37bd024c8..91d96f169421 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -671,6 +671,7 @@ static void cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
cancel_work_sync(&client_data->work_ishtp_reset); cancel_work_sync(&client_data->work_ec_evt); + cros_ec_unregister(client_data->ec_dev); cros_ish_deinit(cros_ish_cl); ishtp_put_device(cl_device); } diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index 9a0220b4de3c..67d9b19731ed 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -796,18 +796,18 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_miss_events[] = { {11, "GDC_MISS_MACHINE_CHI_TXDAT"}, {12, "GDC_MISS_MACHINE_CHI_RXDAT"}, {13, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_0"}, - {14, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_1 "}, + {14, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_1"}, {15, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_2"}, - {16, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_3 "}, - {17, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_0 "}, - {18, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_1 "}, - {19, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_2 "}, - {20, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_3 "}, + {16, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC0_3"}, + {17, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_0"}, + {18, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_1"}, + {19, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_2"}, + {20, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC1_3"}, {21, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_0"}, {22, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_1"}, {23, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_2"}, {24, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE0_3"}, - {25, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_0 "}, + {25, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_0"}, {26, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_1"}, {27, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_2"}, {28, "GDC_MISS_MACHINE_G_FIFO_FF_EXEC_DONE1_3"}, diff --git a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c index c50ad5880503..f346aad8e9d8 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c +++ b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c @@ -207,7 +207,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum case PREREQUISITES: size = min_t(u32, enum_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE); for (reqs = 0; reqs < size; reqs++) { - if (elem >= enum_obj_count) { + if (elem + reqs >= enum_obj_count) { pr_err("Error enum-objects package is too small\n"); return -EINVAL; } @@ -255,7 +255,7 @@ static int hp_populate_enumeration_elements_from_package(union acpi_object *enum
for (pos_values = 0; pos_values < size && pos_values < MAX_VALUES_SIZE; pos_values++) { - if (elem >= enum_obj_count) { + if (elem + pos_values >= enum_obj_count) { pr_err("Error enum-objects package is too small\n"); return -EINVAL; } diff --git a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c index 6c7f4d5fa9cb..63b1fda2be4e 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c +++ b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c @@ -227,7 +227,7 @@ static int hp_populate_integer_elements_from_package(union acpi_object *integer_ size = min_t(u32, integer_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) { - if (elem >= integer_obj_count) { + if (elem + reqs >= integer_obj_count) { pr_err("Error elem-objects package is too small\n"); return -EINVAL; } diff --git a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c index c6e57bb9d8b7..6a31f47ce3f5 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c +++ b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c @@ -216,6 +216,11 @@ static int hp_populate_ordered_list_elements_from_package(union acpi_object *ord size = min_t(u32, ordered_list_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE); for (reqs = 0; reqs < size; reqs++) { + if (elem + reqs >= order_obj_count) { + pr_err("Error elem-objects package is too small\n"); + return -EINVAL; + } + ret = hp_convert_hexstr_to_str(order_obj[elem + reqs].string.pointer, order_obj[elem + reqs].string.length, &str_value, &value_len); diff --git a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c index 35936c05e45b..a5c457d06b9c 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c +++ b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c @@ -303,6 +303,11 @@ static int hp_populate_password_elements_from_package(union acpi_object *passwor MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) { + if (elem + reqs >= password_obj_count) { + pr_err("Error elem-objects package is too small\n"); + return -EINVAL; + } + ret = hp_convert_hexstr_to_str(password_obj[elem + reqs].string.pointer, password_obj[elem + reqs].string.length, &str_value, &value_len); diff --git a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c index 27758b779b2d..7b885d25650c 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c +++ b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c @@ -217,7 +217,7 @@ static int hp_populate_string_elements_from_package(union acpi_object *string_ob MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) { - if (elem >= string_obj_count) { + if (elem + reqs >= string_obj_count) { pr_err("Error elem-objects package is too small\n"); return -EINVAL; } diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 231b37909801..139956168cf9 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -273,7 +273,7 @@ static int __init ibm_rtl_init(void) { /* search for the _RTL_ signature at the start of the table */ for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) { struct ibm_rtl_table __iomem * tmp; - tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i); + tmp = (struct ibm_rtl_table __iomem *) (ebda_map + i*sizeof(unsigned int)); if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) { phys_addr_t addr; unsigned int plen; diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c index 11503b1c85f3..044c9f2bb710 100644 --- a/drivers/platform/x86/intel/chtwc_int33fe.c +++ b/drivers/platform/x86/intel/chtwc_int33fe.c @@ -77,7 +77,7 @@ static const struct software_node max17047_node = { * software node. */ static struct software_node_ref_args fusb302_mux_refs[] = { - { .node = NULL }, + SOFTWARE_NODE_REFERENCE(NULL), };
static const struct property_entry fusb302_properties[] = { @@ -190,11 +190,6 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) { software_node_unregister_node_group(node_group);
- if (fusb302_mux_refs[0].node) { - fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node)); - fusb302_mux_refs[0].node = NULL; - } - if (data->dp) { data->dp->secondary = NULL; fwnode_handle_put(data->dp); @@ -202,7 +197,15 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) } }
-static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) +static void cht_int33fe_put_swnode(void *data) +{ + struct fwnode_handle *fwnode = data; + + fwnode_handle_put(fwnode); + fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(NULL); +} + +static int cht_int33fe_add_nodes(struct device *dev, struct cht_int33fe_data *data) { const struct software_node *mux_ref_node; int ret; @@ -212,17 +215,25 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) * until the mux driver has created software node for the mux device. * It means we depend on the mux driver. This function will return * -EPROBE_DEFER until the mux device is registered. + * + * FIXME: the relevant software node exists in intel-xhci-usb-role-switch + * and - if exported - could be used to set up a static reference. */ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); if (!mux_ref_node) return -EPROBE_DEFER;
+ ret = devm_add_action_or_reset(dev, cht_int33fe_put_swnode, + software_node_fwnode(mux_ref_node)); + if (ret) + return ret; + /* * Update node used in "usb-role-switch" property. Note that we * rely on software_node_register_node_group() to use the original * instance of properties instead of copying them. */ - fusb302_mux_refs[0].node = mux_ref_node; + fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(mux_ref_node);
ret = software_node_register_node_group(node_group); if (ret) @@ -345,7 +356,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev) return fusb302_irq; }
- ret = cht_int33fe_add_nodes(data); + ret = cht_int33fe_add_nodes(dev, data); if (ret) return ret;
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 59392f1a0d8a..04056fbd9219 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -168,6 +168,18 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 10 Tablet RA00260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 12 Tablet RA02260"), + }, + }, { } };
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index e5391a37014d..db3dadd29b29 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -1130,6 +1130,9 @@ static void __exit msi_cleanup(void) sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group); if (!quirks->old_ec_model && threeg_exists) device_remove_file(&msipf_device->dev, &dev_attr_threeg); + if (quirks->old_ec_model) + sysfs_remove_group(&msipf_device->dev.kobj, + &msipf_old_attribute_group); platform_device_unregister(msipf_device); platform_driver_unregister(&msipf_driver); backlight_device_unregister(msibl_device); diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c index 00fdb5a905e2..b811d0ad94e2 100644 --- a/drivers/pmdomain/imx/gpc.c +++ b/drivers/pmdomain/imx/gpc.c @@ -403,13 +403,12 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap, static int imx_gpc_probe(struct platform_device *pdev) { const struct imx_gpc_dt_data *of_id_data = device_get_match_data(&pdev->dev); - struct device_node *pgc_node; + struct device_node *pgc_node __free(device_node) + = of_get_child_by_name(pdev->dev.of_node, "pgc"); struct regmap *regmap; void __iomem *base; int ret;
- pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); - /* bail out if DT too old and doesn't provide the necessary info */ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && !pgc_node) diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index a2f9d85c7156..58c4e2aac951 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -1399,6 +1399,7 @@ static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept) { struct glink_channel *channel = to_glink_channel(ept); struct qcom_glink *glink = channel->glink; + struct rpmsg_channel_info chinfo; unsigned long flags;
spin_lock_irqsave(&channel->recv_lock, flags); @@ -1406,6 +1407,13 @@ static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept) spin_unlock_irqrestore(&channel->recv_lock, flags);
/* Decouple the potential rpdev from the channel */ + if (channel->rpdev) { + strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + rpmsg_unregister_device(glink->dev, &chinfo); + } channel->rpdev = NULL;
qcom_glink_send_close_req(glink, channel); diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 90b106408992..b7fbfd9aa908 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -6149,6 +6149,7 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid struct dasd_copy_relation *copy; struct dasd_block *block; struct gendisk *gdp; + int rc;
copy = device->copy; if (!copy) @@ -6183,6 +6184,13 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid /* swap blocklayer device link */ gdp = block->gdp; dasd_add_link_to_gendisk(gdp, secondary); + rc = device_move(disk_to_dev(gdp), &secondary->cdev->dev, DPM_ORDER_NONE); + if (rc) { + dev_err(&primary->cdev->dev, + "copy_pair_swap: moving blockdevice parent %s->%s failed (%d)\n", + dev_name(&primary->cdev->dev), + dev_name(&secondary->cdev->dev), rc); + }
/* re-enable device */ dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 538a5867e8ab..35b78f8cf62c 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -882,6 +882,9 @@ static void asd_pci_remove(struct pci_dev *dev)
asd_disable_ints(asd_ha);
+ /* Ensure all scheduled tasklets complete before freeing resources */ + tasklet_kill(&asd_ha->seq.dl_tasklet); + asd_remove_dev_attrs(asd_ha);
/* XXX more here as needed */ diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h index c374867f9ba0..e19fd2f91149 100644 --- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h +++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h @@ -160,6 +160,7 @@ struct mpi3_ioc_facts_data { #define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000) #define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00) #define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8) +#define MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT (0x00000040) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000) #define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010) diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index acb2f7b04fb8..4198830bf10b 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -3135,6 +3135,8 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc, mrioc->facts.dma_mask = (facts_flags & MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >> MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT; + mrioc->facts.max_req_limit = (facts_flags & + MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT); mrioc->facts.protocol_flags = facts_data->protocol_flags; mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word); mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index cb95b7b12051..b3265952c4be 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3503,7 +3503,6 @@ struct isp_operations { #define QLA_MSIX_RSP_Q 0x01 #define QLA_ATIO_VECTOR 0x02 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 -#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS 0x04
#define QLA_MIDX_DEFAULT 0 #define QLA_MIDX_RSP_Q 1 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index e556f57c91af..59f448e2e319 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -768,7 +768,7 @@ extern int qla2x00_dfs_remove(scsi_qla_host_t *);
/* Globa function prototypes for multi-q */ extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *, - struct qla_msix_entry *, int); + struct qla_msix_entry *); extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index fe98c76e9be3..77c779cca97f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -4467,32 +4467,6 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id) return IRQ_HANDLED; }
-irqreturn_t -qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) -{ - struct qla_hw_data *ha; - struct qla_qpair *qpair; - struct device_reg_24xx __iomem *reg; - unsigned long flags; - - qpair = dev_id; - if (!qpair) { - ql_log(ql_log_info, NULL, 0x505b, - "%s: NULL response queue pointer.\n", __func__); - return IRQ_NONE; - } - ha = qpair->hw; - - reg = &ha->iobase->isp24; - spin_lock_irqsave(&ha->hardware_lock, flags); - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - queue_work(ha->wq, &qpair->q_work); - - return IRQ_HANDLED; -} - /* Interrupt handling helpers. */
struct qla_init_msix_entry { @@ -4505,7 +4479,6 @@ static const struct qla_init_msix_entry msix_entries[] = { { "rsp_q", qla24xx_msix_rsp_q }, { "atio_q", qla83xx_msix_atio_q }, { "qpair_multiq", qla2xxx_msix_rsp_q }, - { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, };
static const struct qla_init_msix_entry qla82xx_msix_entries[] = { @@ -4792,9 +4765,10 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) }
int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, - struct qla_msix_entry *msix, int vector_type) + struct qla_msix_entry *msix) { - const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; + const struct qla_init_msix_entry *intr = + &msix_entries[QLA_MSIX_QPAIR_MULTIQ_RSP_Q]; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); int ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 13b6cb1b93ac..41435e98092a 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -253,6 +253,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + reinit_completion(&ha->mbx_intr_comp);
/* Unlock mbx registers and wait for interrupt */ ql_dbg(ql_dbg_mbx, vha, 0x100f, @@ -279,6 +280,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "cmd=%x Timeout.\n", command); spin_lock_irqsave(&ha->hardware_lock, flags); clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + reinit_completion(&ha->mbx_intr_comp); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (chip_reset != ha->chip_reset) { diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 79879c4743e6..9946899dd83b 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -899,9 +899,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out);
- ret = qla25xx_request_irq(ha, qpair, qpair->msix, - ha->flags.disable_msix_handshake ? - QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); + ret = qla25xx_request_irq(ha, qpair, qpair->msix); if (ret) goto que_failed;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 81c76678f25a..4e6c07b61842 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1874,12 +1874,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - if (qla2x00_chip_is_down(vha)) { - req->outstanding_cmds[cnt] = NULL; - sp->done(sp, res); - continue; - } - switch (sp->cmd_type) { case TYPE_SRB: qla2x00_abort_srb(qp, sp, res, &flags); @@ -3456,13 +3450,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mqenable = 0;
if (ha->mqenable) { - bool startit = false; - - if (QLA_TGT_MODE_ENABLED()) - startit = false; - - if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) - startit = true; + bool startit = !!(host->active_mode & MODE_INITIATOR);
/* Create start of day qpairs for Block MQ */ for (i = 0; i < ha->max_qpairs; i++) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 89a2aaccdcfc..dfe38d34d051 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -6716,7 +6716,7 @@ MODULE_PARM_DESC(lbprz, MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); -MODULE_PARM_DESC(atomic_write, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)"); +MODULE_PARM_DESC(atomic_wr, "enable ATOMIC WRITE support, support WRITE ATOMIC(16) (def=0)"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 018f5428a07d..f0fb22e4117e 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -10091,6 +10091,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x207d, 0x4240) }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4840) + }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c index b6e06c4d2117..0711088da5dc 100644 --- a/drivers/soc/amlogic/meson-canvas.c +++ b/drivers/soc/amlogic/meson-canvas.c @@ -73,10 +73,9 @@ struct meson_canvas *meson_canvas_get(struct device *dev) * current state, this driver probe cannot return -EPROBE_DEFER */ canvas = dev_get_drvdata(&canvas_pdev->dev); - if (!canvas) { - put_device(&canvas_pdev->dev); + put_device(&canvas_pdev->dev); + if (!canvas) return ERR_PTR(-EINVAL); - }
return canvas; } diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c index 49a0955e82d6..1685da1da23d 100644 --- a/drivers/soc/apple/mailbox.c +++ b/drivers/soc/apple/mailbox.c @@ -299,11 +299,18 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index) return ERR_PTR(-EPROBE_DEFER);
mbox = platform_get_drvdata(pdev); - if (!mbox) - return ERR_PTR(-EPROBE_DEFER); + if (!mbox) { + mbox = ERR_PTR(-EPROBE_DEFER); + goto out_put_pdev; + } + + if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) { + mbox = ERR_PTR(-ENODEV); + goto out_put_pdev; + }
- if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) - return ERR_PTR(-ENODEV); +out_put_pdev: + put_device(&pdev->dev);
return mbox; } diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index 9c3bd37b6579..71130a2f62e9 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -202,9 +202,9 @@ struct ocmem *of_get_ocmem(struct device *dev) }
ocmem = platform_get_drvdata(pdev); + put_device(&pdev->dev); if (!ocmem) { dev_err(dev, "Cannot get ocmem\n"); - put_device(&pdev->dev); return ERR_PTR(-ENODEV); } return ocmem; diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c index 77a70d3d0d0b..6ce2dbea1500 100644 --- a/drivers/soc/qcom/qcom-pbs.c +++ b/drivers/soc/qcom/qcom-pbs.c @@ -179,6 +179,8 @@ struct pbs_dev *get_pbs_client_device(struct device *dev) return ERR_PTR(-EINVAL); }
+ platform_device_put(pdev); + return pbs; } EXPORT_SYMBOL_GPL(get_pbs_client_device); diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index c40313886a01..7291c2fb14d7 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -322,6 +322,8 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, if (!dev) return ERR_PTR(-EPROBE_DEFER);
+ put_device(dev); + return syscon_node_to_regmap(pmu_np); } EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index d27667283846..74d2fedea71c 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -182,8 +182,6 @@ static int tegra_fuse_probe(struct platform_device *pdev) }
fuse->soc->init(fuse); - tegra_fuse_print_sku_info(&tegra_sku_info); - tegra_soc_device_register();
err = tegra_fuse_add_lookups(fuse); if (err) diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 6c1e3aed8162..9764da386921 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1744,12 +1744,13 @@ static int set_stream(struct snd_pcm_substream *substream, * sdw_alloc_stream() - Allocate and return stream runtime * * @stream_name: SoundWire stream name + * @type: stream type (could be PCM ,PDM or BPT) * * Allocates a SoundWire stream runtime instance. * sdw_alloc_stream should be called only once per stream. Typically * invoked from ALSA/ASoC machine/platform driver. */ -struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name) +struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type) { struct sdw_stream_runtime *stream;
@@ -1761,6 +1762,7 @@ struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name) INIT_LIST_HEAD(&stream->master_list); stream->state = SDW_STREAM_ALLOCATED; stream->m_rt_count = 0; + stream->type = type;
return stream; } @@ -1789,7 +1791,7 @@ int sdw_startup_stream(void *sdw_substream) if (!name) return -ENOMEM;
- sdw_stream = sdw_alloc_stream(name); + sdw_stream = sdw_alloc_stream(name, SDW_STREAM_PCM); if (!sdw_stream) { dev_err(rtd->dev, "alloc stream failed for substream DAI %s\n", substream->name); ret = -ENOMEM; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 06e43b184d85..aca3681d32ea 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1959,7 +1959,9 @@ static int cqspi_probe(struct platform_device *pdev) probe_reset_failed: if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); - clk_disable_unprepare(cqspi->clk); + + if (pm_runtime_get_sync(&pdev->dev) >= 0) + clk_disable_unprepare(cqspi->clk); probe_clk_failed: return ret; } diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 997e07c0a24a..5a44627be930 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -335,7 +335,7 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr, if (t->bits_per_word == 16 || t->bits_per_word == 32) t->bits_per_word = 8; /* pretend its 8 bits */ if (t->bits_per_word == 8 && t->len >= 256 && - (mpc8xxx_spi->flags & SPI_CPM1)) + !(t->len & 1) && (mpc8xxx_spi->flags & SPI_CPM1)) t->bits_per_word = 16; } } diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c index cdf4ebb93b10..413a4f296d89 100644 --- a/drivers/staging/greybus/uart.c +++ b/drivers/staging/greybus/uart.c @@ -914,7 +914,6 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev) { struct gb_tty *gb_tty = gb_gbphy_get_data(gbphy_dev); struct gb_connection *connection = gb_tty->connection; - struct tty_struct *tty; int ret;
ret = gbphy_runtime_get_sync(gbphy_dev); @@ -927,11 +926,7 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev) wake_up_all(&gb_tty->wioctl); mutex_unlock(&gb_tty->mutex);
- tty = tty_port_tty_get(&gb_tty->port); - if (tty) { - tty_vhangup(tty); - tty_kref_put(tty); - } + tty_port_tty_vhangup(&gb_tty->port);
gb_connection_disable_rx(connection); tty_unregister_device(gb_tty_driver, gb_tty->minor); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 05d29201b730..cf9834f958c9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1524,6 +1524,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) { + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c index cb3b127b06b6..1e1ad28d83fc 100644 --- a/drivers/tty/serial/serial_base_bus.c +++ b/drivers/tty/serial/serial_base_bus.c @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/idr.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/serial_core.h> #include <linux/slab.h> #include <linux/spinlock.h> @@ -59,6 +60,7 @@ void serial_base_driver_unregister(struct device_driver *driver) driver_unregister(driver); }
+/* On failure the caller must put device @dev with put_device() */ static int serial_base_device_init(struct uart_port *port, struct device *dev, struct device *parent_dev, @@ -72,7 +74,9 @@ static int serial_base_device_init(struct uart_port *port, dev->parent = parent_dev; dev->bus = &serial_base_bus_type; dev->release = release; - device_set_of_node_from_dev(dev, parent_dev); + dev->of_node_reused = true; + + device_set_node(dev, fwnode_handle_get(dev_fwnode(parent_dev)));
if (!serial_base_initialized) { dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n"); @@ -93,6 +97,7 @@ static void serial_base_ctrl_release(struct device *dev) { struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
+ fwnode_handle_put(dev_fwnode(dev)); kfree(ctrl_dev); }
@@ -140,6 +145,7 @@ static void serial_base_port_release(struct device *dev) { struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ fwnode_handle_put(dev_fwnode(dev)); kfree(port_dev); }
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 440303566b14..75272749b397 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -3238,7 +3238,6 @@ static void serial_core_remove_one_port(struct uart_driver *drv, struct uart_state *state = drv->state + uport->line; struct tty_port *port = &state->port; struct uart_port *uart_port; - struct tty_struct *tty;
mutex_lock(&port->mutex); uart_port = uart_port_check(state); @@ -3257,11 +3256,7 @@ static void serial_core_remove_one_port(struct uart_driver *drv, */ tty_port_unregister_device(port, drv->tty_driver, uport->line);
- tty = tty_port_tty_get(port); - if (tty) { - tty_vhangup(port->tty); - tty_kref_put(tty); - } + tty_port_tty_vhangup(port);
/* * If the port is used as a console, unregister it diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 53236e3e4fa4..22c958a0308f 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1761,7 +1761,7 @@ static void sci_dma_check_tx_occurred(struct sci_port *s) struct dma_tx_state state; enum dma_status status;
- if (!s->chan_tx) + if (!s->chan_tx || s->cookie_tx <= 0) return;
status = dmaengine_tx_status(s->chan_tx, s->cookie_tx, &state); diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 3fc54cc02a1f..c575c38b513d 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -1109,6 +1109,9 @@ static int sprd_clk_init(struct uart_port *uport)
clk_uart = devm_clk_get(uport->dev, "uart"); if (IS_ERR(clk_uart)) { + if (PTR_ERR(clk_uart) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_warn(uport->dev, "uart%d can't get uart clock\n", uport->line); clk_uart = NULL; @@ -1116,6 +1119,9 @@ static int sprd_clk_init(struct uart_port *uport)
clk_parent = devm_clk_get(uport->dev, "source"); if (IS_ERR(clk_parent)) { + if (PTR_ERR(clk_parent) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_warn(uport->dev, "uart%d can't get source clock\n", uport->line); clk_parent = NULL; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 1d636578c1ef..239d28489841 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -431,10 +431,17 @@ static void cdns_uart_handle_tx(void *dev_id) struct tty_port *tport = &port->state->port; unsigned int numbytes; unsigned char ch; + ktime_t rts_delay;
if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { /* Disable the TX Empty interrupt */ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR); + /* Set RTS line after delay */ + if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) { + cdns_uart->tx_timer.function = &cdns_rs485_rx_callback; + rts_delay = ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)); + hrtimer_start(&cdns_uart->tx_timer, rts_delay, HRTIMER_MODE_REL); + } return; }
@@ -451,13 +458,6 @@ static void cdns_uart_handle_tx(void *dev_id)
/* Enable the TX Empty interrupt */ writel(CDNS_UART_IXR_TXEMPTY, cdns_uart->port->membase + CDNS_UART_IER); - - if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED && - (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) { - cdns_uart->tx_timer.function = &cdns_rs485_rx_callback; - hrtimer_start(&cdns_uart->tx_timer, - ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL); - } }
/** @@ -734,7 +734,7 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) { if (!cdns_uart->rs485_tx_started) { - cdns_uart->tx_timer.function = &cdns_rs485_tx_callback; + hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_tx_callback); cdns_rs485_tx_setup(cdns_uart); return hrtimer_start(&cdns_uart->tx_timer, ms_to_ktime(port->rs485.delay_rts_before_send), diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 14cca33d2269..e10ccce1653e 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -411,20 +411,19 @@ void tty_port_hangup(struct tty_port *port) } EXPORT_SYMBOL(tty_port_hangup);
-/** - * tty_port_tty_hangup - helper to hang up a tty - * @port: tty port - * @check_clocal: hang only ttys with %CLOCAL unset? - */ -void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) +void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async) { struct tty_struct *tty = tty_port_tty_get(port);
- if (tty && (!check_clocal || !C_CLOCAL(tty))) - tty_hangup(tty); + if (tty && (!check_clocal || !C_CLOCAL(tty))) { + if (async) + tty_hangup(tty); + else + tty_vhangup(tty); + } tty_kref_put(tty); } -EXPORT_SYMBOL_GPL(tty_port_tty_hangup); +EXPORT_SYMBOL_GPL(__tty_port_tty_hangup);
/** * tty_port_tty_wakeup - helper to wake up a tty diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 33534e455b55..24eb4795328e 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -10112,7 +10112,7 @@ static int ufshcd_suspend(struct ufs_hba *hba) ret = ufshcd_setup_clocks(hba, false); if (ret) { ufshcd_enable_irq(hba); - return ret; + goto out; } if (ufshcd_is_clkgating_allowed(hba)) { hba->clk_gating.state = CLKS_OFF; @@ -10124,6 +10124,9 @@ static int ufshcd_suspend(struct ufs_hba *hba) /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); ufshcd_pm_qos_update(hba, false); +out: + if (ret) + ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret); return ret; }
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 00ecfe14c1fd..1fb98af4ac56 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -1994,6 +1994,11 @@ static int ufs_mtk_system_suspend(struct device *dev) struct arm_smccc_res res; int ret;
+ if (hba->shutting_down) { + ret = -EBUSY; + goto out; + } + ret = ufshcd_system_suspend(dev); if (ret) goto out; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 5a334e370f4d..73f9476774ae 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1572,7 +1572,6 @@ static int acm_probe(struct usb_interface *intf, static void acm_disconnect(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); - struct tty_struct *tty; int i;
/* sibling interface is already cleaning up */ @@ -1599,11 +1598,7 @@ static void acm_disconnect(struct usb_interface *intf) usb_set_intfdata(acm->data, NULL); mutex_unlock(&acm->mutex);
- tty = tty_port_tty_get(&acm->port); - if (tty) { - tty_vhangup(tty); - tty_kref_put(tty); - } + tty_port_tty_vhangup(&acm->port);
cancel_delayed_work_sync(&acm->dwork);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index be7be00ecb34..415eb5a03ff5 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -70,11 +70,11 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) simple->num_clocks = ret; ret = clk_bulk_prepare_enable(simple->num_clocks, simple->clks); if (ret) - goto err_resetc_assert; + goto err_clk_put_all;
ret = of_platform_populate(np, NULL, NULL, dev); if (ret) - goto err_clk_put; + goto err_clk_disable;
pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -82,8 +82,9 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
return 0;
-err_clk_put: +err_clk_disable: clk_bulk_disable_unprepare(simple->num_clocks, simple->clks); +err_clk_put_all: clk_bulk_put_all(simple->num_clocks, simple->clks);
err_resetc_assert: diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a9d39837cadf..6d1072d041f7 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4793,7 +4793,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc) if (!dwc->gadget) return;
- dwc3_enable_susphy(dwc, false); + dwc3_enable_susphy(dwc, true); usb_del_gadget(dwc->gadget); dwc3_gadget_free_endpoints(dwc); usb_put_gadget(dwc->gadget); diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index f040d67a10b0..78e391763d72 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -223,7 +223,7 @@ void dwc3_host_exit(struct dwc3 *dwc) if (dwc->sys_wakeup) device_init_wakeup(&dwc->xhci->dev, false);
- dwc3_enable_susphy(dwc, false); + dwc3_enable_susphy(dwc, true); platform_device_unregister(dwc->xhci); dwc->xhci = NULL; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 3bfd889ed56a..a2620c31cc94 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -3020,7 +3020,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) pdev->dev.dma_mask = &lpc32xx_usbd_dmamask; retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (retval) - return retval; + goto err_put_client;
udc->board = &lpc32xx_usbddata;
@@ -3038,28 +3038,32 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) /* Get IRQs */ for (i = 0; i < 4; i++) { udc->udp_irq[i] = platform_get_irq(pdev, i); - if (udc->udp_irq[i] < 0) - return udc->udp_irq[i]; + if (udc->udp_irq[i] < 0) { + retval = udc->udp_irq[i]; + goto err_put_client; + } }
udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(udc->udp_baseaddr)) { dev_err(udc->dev, "IO map failure\n"); - return PTR_ERR(udc->udp_baseaddr); + retval = PTR_ERR(udc->udp_baseaddr); + goto err_put_client; }
/* Get USB device clock */ udc->usb_slv_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(udc->usb_slv_clk)) { dev_err(udc->dev, "failed to acquire USB device clock\n"); - return PTR_ERR(udc->usb_slv_clk); + retval = PTR_ERR(udc->usb_slv_clk); + goto err_put_client; }
/* Enable USB device clock */ retval = clk_prepare_enable(udc->usb_slv_clk); if (retval < 0) { dev_err(udc->dev, "failed to start USB device clock\n"); - return retval; + goto err_put_client; }
/* Setup deferred workqueue data */ @@ -3162,6 +3166,9 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) udc->udca_v_base, udc->udca_p_base); i2c_fail: clk_disable_unprepare(udc->usb_slv_clk); +err_put_client: + put_device(&udc->isp1301_i2c_client->dev); + dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
return retval; @@ -3190,6 +3197,8 @@ static void lpc32xx_udc_remove(struct platform_device *pdev) udc->udca_v_base, udc->udca_p_base);
clk_disable_unprepare(udc->usb_slv_clk); + + put_device(&udc->isp1301_i2c_client->dev); }
#ifdef CONFIG_PM diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c index 5b775e1ea527..25a739ebc215 100644 --- a/drivers/usb/host/ohci-nxp.c +++ b/drivers/usb/host/ohci-nxp.c @@ -223,6 +223,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev) fail_resource: usb_put_hcd(hcd); fail_disable: + put_device(&isp1301_i2c_client->dev); isp1301_i2c_client = NULL; return ret; } @@ -234,6 +235,7 @@ static void ohci_hcd_nxp_remove(struct platform_device *pdev) usb_remove_hcd(hcd); ohci_nxp_stop_hc(); usb_put_hcd(hcd); + put_device(&isp1301_i2c_client->dev); isp1301_i2c_client = NULL; }
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 349931a80cc8..54b68ce8f2f5 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -522,7 +522,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) * Hang up the TTY. This wakes up any blocked * writers and causes subsequent writes to fail. */ - tty_vhangup(port->port.tty); + tty_port_tty_vhangup(&port->port);
tty_unregister_device(dbc_tty_driver, port->minor); xhci_dbc_tty_exit_port(port); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 69aedce9d67b..49cba1cdd91b 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1671,7 +1671,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) * SS devices are only visible to roothub after link training completes. * Keep polling roothubs for a grace period after xHC start */ - if (xhci->run_graceperiod) { + if (hcd->speed >= HCD_USB3 && xhci->run_graceperiod) { if (time_before(jiffies, xhci->run_graceperiod)) status = 1; else diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index c5c6b818998e..481bb775d761 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -987,6 +987,7 @@ static void fsl_otg_remove(struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ disable_delayed_work_sync(&fsl_otg_dev->otg_event); usb_remove_phy(&fsl_otg_dev->phy); free_irq(fsl_otg_dev->irq, fsl_otg_dev);
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index 993d7525a102..183866b7fce2 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c @@ -149,7 +149,12 @@ struct i2c_client *isp1301_get_client(struct device_node *node) return client;
/* non-DT: only one ISP1301 chip supported */ - return isp1301_i2c_client; + if (isp1301_i2c_client) { + get_device(&isp1301_i2c_client->dev); + return isp1301_i2c_client; + } + + return NULL; } EXPORT_SYMBOL_GPL(isp1301_get_client);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 75fff2e4cbc6..56fc3ff5016f 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -713,11 +713,13 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, /* make sure pipe is not busy */ ret = usbhsp_pipe_barrier(pipe); if (ret < 0) { + usbhsp_put_pipe(pipe); dev_err(dev, "pipe setup failed %d\n", usbhs_pipe_number(pipe)); return NULL; }
if (usbhsp_setup_pipecfg(pipe, is_host, dir_in, &pipecfg)) { + usbhsp_put_pipe(pipe); dev_err(dev, "can't setup pipe\n"); return NULL; } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index df6a2ae0bf42..2ee0b64b8be0 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1178,7 +1178,6 @@ static void usb_serial_disconnect(struct usb_interface *interface) struct usb_serial *serial = usb_get_intfdata(interface); struct device *dev = &interface->dev; struct usb_serial_port *port; - struct tty_struct *tty;
/* sibling interface is cleaning up */ if (!serial) @@ -1193,11 +1192,7 @@ static void usb_serial_disconnect(struct usb_interface *interface)
for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; - tty = tty_port_tty_get(&port->port); - if (tty) { - tty_vhangup(tty); - tty_kref_put(tty); - } + tty_port_tty_vhangup(&port->port); usb_serial_port_poison_urbs(port); wake_up_interruptible(&port->port.delta_msr_wait); cancel_work_sync(&port->work); diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 1477e31d7763..939a98c2d3f7 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -98,7 +98,7 @@ UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160, US_FL_NO_ATA_1X),
/* Reported-by: Benjamin Tissoires benjamin.tissoires@redhat.com */ -UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, +UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x0309, "Initio Corporation", "INIC-3069", USB_SC_DEVICE, USB_PR_DEVICE, NULL, diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 6964f403a2d5..67cb974ec761 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -736,12 +736,16 @@ int dp_altmode_probe(struct typec_altmode *alt) if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) & DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) && !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) & - DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) + DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) { + typec_altmode_put_plug(plug); return -ENODEV; + }
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL); - if (!dp) + if (!dp) { + typec_altmode_put_plug(plug); return -ENOMEM; + }
INIT_WORK(&dp->work, dp_altmode_work); mutex_init(&dp->lock); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 896e6bc1b5e2..9a0fb6a79b21 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -1772,6 +1772,12 @@ static int ucsi_init(struct ucsi *ucsi) ret = -ENODEV; goto err_reset; } + /* Check if reserved bit set. This is out of spec but happens in buggy FW */ + if (ucsi->cap.num_connectors & 0x80) { + dev_warn(ucsi->dev, "UCSI: Invalid num_connectors %d. Likely buggy FW\n", + ucsi->cap.num_connectors); + ucsi->cap.num_connectors &= 0x7f; // clear bit and carry on + }
/* Allocate the connectors. Released in ucsi_unregister() */ connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index a793e30d46b7..f67b4d33a0ab 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -830,15 +830,15 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag no_need_xmit: usb_hcd_unlink_urb_from_ep(hcd, urb); no_need_unlink: - spin_unlock_irqrestore(&vhci->lock, flags); if (!ret) { /* usb_hcd_giveback_urb() should be called with * irqs disabled */ - local_irq_disable(); + spin_unlock(&vhci->lock); usb_hcd_giveback_urb(hcd, urb, urb->status); - local_irq_enable(); + spin_lock(&vhci->lock); } + spin_unlock_irqrestore(&vhci->lock, flags); return ret; }
diff --git a/drivers/vdpa/octeon_ep/octep_vdpa_main.c b/drivers/vdpa/octeon_ep/octep_vdpa_main.c index cd55b1aac151..70c932df42fa 100644 --- a/drivers/vdpa/octeon_ep/octep_vdpa_main.c +++ b/drivers/vdpa/octeon_ep/octep_vdpa_main.c @@ -692,6 +692,7 @@ static int octep_sriov_enable(struct pci_dev *pdev, int num_vfs) octep_vdpa_assign_barspace(vf_pdev, pdev, index); if (++index == num_vfs) { done = true; + pci_dev_put(vf_pdev); break; } } diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index 9e1c57baab64..9728840cbcc5 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -482,7 +482,7 @@ nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev, ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, nvdev->resmem.ioaddr, buf, offset, mem_count, - 0, 0, false); + 0, 0, false, VFIO_PCI_IO_WIDTH_8); }
return ret; @@ -600,7 +600,7 @@ nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev, ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, nvdev->resmem.ioaddr, (char __user *)buf, pos, mem_count, - 0, 0, true); + 0, 0, true, VFIO_PCI_IO_WIDTH_8); }
return ret; diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c index 481992142f79..4915a7c1c491 100644 --- a/drivers/vfio/pci/pds/dirty.c +++ b/drivers/vfio/pci/pds/dirty.c @@ -292,8 +292,11 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio, len = num_ranges * sizeof(*region_info);
node = interval_tree_iter_first(ranges, 0, ULONG_MAX); - if (!node) - return -EINVAL; + if (!node) { + err = -EINVAL; + goto out_free_region_info; + } + for (int i = 0; i < num_ranges; i++) { struct pds_lm_dirty_region_info *ri = ®ion_info[i]; u64 region_size = node->last - node->start + 1; diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index a0595c745732..03a84663db91 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -141,7 +141,8 @@ VFIO_IORDWR(64) ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite) + size_t x_end, bool iswrite, + enum vfio_pci_io_width max_width) { ssize_t done = 0; int ret; @@ -157,7 +158,7 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, fillable = 0;
#if defined(ioread64) && defined(iowrite64) - if (fillable >= 8 && !(off % 8)) { + if (fillable >= 8 && !(off % 8) && max_width >= 8) { ret = vfio_pci_iordwr64(vdev, iswrite, test_mem, io, buf, off, &filled); if (ret) @@ -165,13 +166,13 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
} else #endif - if (fillable >= 4 && !(off % 4)) { + if (fillable >= 4 && !(off % 4) && max_width >= 4) { ret = vfio_pci_iordwr32(vdev, iswrite, test_mem, io, buf, off, &filled); if (ret) return ret;
- } else if (fillable >= 2 && !(off % 2)) { + } else if (fillable >= 2 && !(off % 2) && max_width >= 2) { ret = vfio_pci_iordwr16(vdev, iswrite, test_mem, io, buf, off, &filled); if (ret) @@ -242,6 +243,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, void __iomem *io; struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; + enum vfio_pci_io_width max_width = VFIO_PCI_IO_WIDTH_8;
if (pci_resource_start(pdev, bar)) end = pci_resource_len(pdev, bar); @@ -268,6 +270,16 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, goto out; } x_end = end; + + /* + * Certain devices (e.g. Intel X710) don't support qword + * access to the ROM bar. Otherwise PCI AER errors might be + * triggered. + * + * Disable qword access to the ROM bar universally, which + * worked reliably for years before qword access is enabled. + */ + max_width = VFIO_PCI_IO_WIDTH_4; } else { int ret = vfio_pci_core_setup_barmap(vdev, bar); if (ret) { @@ -284,7 +296,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, }
done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, - count, x_start, x_end, iswrite); + count, x_start, x_end, iswrite, max_width);
if (done >= 0) *ppos += done; @@ -353,7 +365,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, * to the memory enable bit in the command register. */ done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count, - 0, 0, iswrite); + 0, 0, iswrite, VFIO_PCI_IO_WIDTH_8);
vga_put(vdev->pdev, rsrc);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 66a0f060770e..2dea6f868674 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -64,14 +64,15 @@ static u32 vhost_transport_get_local_cid(void) return VHOST_VSOCK_DEFAULT_HOST_CID; }
-/* Callers that dereference the return value must hold vhost_vsock_mutex or the - * RCU read lock. +/* Callers must be in an RCU read section or hold the vhost_vsock_mutex. + * The return value can only be dereferenced while within the section. */ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) { struct vhost_vsock *vsock;
- hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { + hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid, + lockdep_is_held(&vhost_vsock_mutex)) { u32 other_cid = vsock->guest_cid;
/* Skip instances that have no CID yet */ @@ -708,9 +709,15 @@ static void vhost_vsock_reset_orphans(struct sock *sk) * executing. */
+ rcu_read_lock(); + /* If the peer is still valid, no need to reset connection */ - if (vhost_vsock_get(vsk->remote_addr.svm_cid)) + if (vhost_vsock_get(vsk->remote_addr.svm_cid)) { + rcu_read_unlock(); return; + } + + rcu_read_unlock();
/* If the close timeout is pending, let it expire. This avoids races * with the timeout callback. diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c index 4c36a3e409be..cb6ff15a21db 100644 --- a/drivers/video/fbdev/gbefb.c +++ b/drivers/video/fbdev/gbefb.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/fb.h> @@ -65,7 +66,7 @@ struct gbefb_par { static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024; static void *gbe_mem; static dma_addr_t gbe_dma_addr; -static unsigned long gbe_mem_phys; +static phys_addr_t gbe_mem_phys;
static struct { uint16_t *cpu; @@ -1183,7 +1184,7 @@ static int gbefb_probe(struct platform_device *p_dev) goto out_release_mem_region; }
- gbe_mem_phys = (unsigned long) gbe_dma_addr; + gbe_mem_phys = dma_to_phys(&p_dev->dev, gbe_dma_addr); }
par = info->par; diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index 4aa84853e31a..eb15d0a501af 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -418,12 +418,12 @@ static int pxafb_adjust_timing(struct pxafb_info *fbi, var->yres = max_t(int, var->yres, MIN_YRES);
if (!(fbi->lccr0 & LCCR0_LCDT)) { - clamp_val(var->hsync_len, 1, 64); - clamp_val(var->vsync_len, 1, 64); - clamp_val(var->left_margin, 1, 255); - clamp_val(var->right_margin, 1, 255); - clamp_val(var->upper_margin, 1, 255); - clamp_val(var->lower_margin, 1, 255); + var->hsync_len = clamp(var->hsync_len, 1, 64); + var->vsync_len = clamp(var->vsync_len, 1, 64); + var->left_margin = clamp(var->left_margin, 1, 255); + var->right_margin = clamp(var->right_margin, 1, 255); + var->upper_margin = clamp(var->upper_margin, 1, 255); + var->lower_margin = clamp(var->lower_margin, 1, 255); }
/* make sure each line is aligned on word boundary */ diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c index f9a0085ad72b..ca9e84e8d860 100644 --- a/drivers/video/fbdev/tcx.c +++ b/drivers/video/fbdev/tcx.c @@ -428,7 +428,7 @@ static int tcx_probe(struct platform_device *op) j = i; break; } - par->mmap_map[i].poff = op->resource[j].start; + par->mmap_map[i].poff = op->resource[j].start - info->fix.smem_start; }
info->fbops = &tcx_ops; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index b36d2803674e..b7b9fb80d6a0 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -866,15 +866,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, tell_host(vb, vb->inflate_vq);
/* balloon's page migration 2nd step -- deflate "page" */ - spin_lock_irqsave(&vb_dev_info->pages_lock, flags); - balloon_page_delete(page); - spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb, vb->pfns, page); tell_host(vb, vb->deflate_vq);
mutex_unlock(&vb->balloon_lock);
+ balloon_page_finalize(page); put_page(page); /* balloon reference */
return MIGRATEPAGE_SUCCESS; diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c index eeb39f96e72e..c1ed3ce153cf 100644 --- a/drivers/watchdog/via_wdt.c +++ b/drivers/watchdog/via_wdt.c @@ -165,6 +165,7 @@ static int wdt_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "cannot enable PCI device\n"); return -ENODEV; } + wdt_res.name = "via_wdt";
/* * Allocate a MMIO region which contains watchdog control register diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 01a1b979b717..ce13b0ec978e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -253,6 +253,7 @@ static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off if (ret < 0) { btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d", logical, ret); + btrfs_release_path(&path); return; } eb = path.nodes[0]; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 03c3b5d0abbe..f15cbbb81660 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2012,10 +2012,8 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap, ret = inode_permission(idmap, temp_inode, MAY_READ | MAY_EXEC); iput(temp_inode); - if (ret) { - ret = -EACCES; + if (ret) goto out_put; - }
if (key.offset == upper_limit) break; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9f811ea604f7..3cbb9f22d394 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -2943,6 +2943,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, unsigned int nofs_flag; bool need_commit = false;
+ /* Set the basic fallback @last_physical before we got a sctx. */ + if (progress) + progress->last_physical = start; + if (btrfs_fs_closing(fs_info)) return -EAGAIN;
@@ -2961,6 +2965,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, sctx = scrub_setup_ctx(fs_info, is_dev_replace); if (IS_ERR(sctx)) return PTR_ERR(sctx); + sctx->stat.last_physical = start;
ret = scrub_workers_get(fs_info); if (ret) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 609f221d4c30..4ed0fcf1fa7d 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -5556,14 +5556,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans, struct btrfs_inode *curr_inode = start_inode; int ret = 0;
- /* - * If we are logging a new name, as part of a link or rename operation, - * don't bother logging new dentries, as we just want to log the names - * of an inode and that any new parents exist. - */ - if (ctx->logging_new_name) - return 0; - path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -5742,6 +5734,33 @@ static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino, return ret; }
+static bool can_log_conflicting_inode(const struct btrfs_trans_handle *trans, + const struct btrfs_inode *inode) +{ + if (!S_ISDIR(inode->vfs_inode.i_mode)) + return true; + + if (inode->last_unlink_trans < trans->transid) + return true; + + /* + * If this is a directory and its unlink_trans is not from a past + * transaction then we must fallback to a transaction commit in order + * to avoid getting a directory with 2 hard links after log replay. + * + * This happens if a directory A is renamed, moved from one parent + * directory to another one, a new file is created in the old parent + * directory with the old name of our directory A, the new file is + * fsynced, then we moved the new file to some other parent directory + * and fsync again the new file. This results in a log tree where we + * logged that directory A existed, with the INODE_REF item for the + * new location but without having logged its old parent inode, so + * that on log replay we add a new link for the new location but the + * old link remains, resulting in a link count of 2. + */ + return false; +} + static int add_conflicting_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -5845,6 +5864,11 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans, return 0; }
+ if (!can_log_conflicting_inode(trans, inode)) { + btrfs_add_delayed_iput(inode); + return BTRFS_LOG_FORCE_COMMIT; + } + btrfs_add_delayed_iput(inode);
ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); @@ -5909,6 +5933,12 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans, break; }
+ if (!can_log_conflicting_inode(trans, inode)) { + btrfs_add_delayed_iput(inode); + ret = BTRFS_LOG_FORCE_COMMIT; + break; + } + /* * Always log the directory, we cannot make this * conditional on need_log_inode() because the directory diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ce991a839046..9c6e96f63013 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7071,6 +7071,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices->seeding = true; fs_devices->opened = 1; + list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list); return fs_devices; }
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 63acd91d15aa..7116f20a7fbe 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1244,14 +1244,14 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped) return err; }
-static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) +static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, bool eio) { struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); const struct z_erofs_decompressor *decomp = z_erofs_decomp[pcl->algorithmformat]; - int i, j, jtop, err2; + int i, j, jtop, err2, err = eio ? -EIO : 0; struct page *page; bool overlapped; bool try_free = true; @@ -1381,12 +1381,12 @@ static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, .pcl = io->head, }; struct z_erofs_pcluster *next; - int err = io->eio ? -EIO : 0; + int err = 0;
for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) { DBG_BUGON(!be.pcl); next = READ_ONCE(be.pcl->next); - err = z_erofs_decompress_pcluster(&be, err) ?: err; + err = z_erofs_decompress_pcluster(&be, io->eio) ?: err; } return err; } diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 7ac5126aa4f1..033852efe5dc 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -25,6 +25,8 @@ static int exfat_cont_expand(struct inode *inode, loff_t size) struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_chain clu;
+ truncate_pagecache(inode, i_size_read(inode)); + ret = inode_newsize_ok(inode, size); if (ret) return ret; @@ -587,6 +589,9 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
inode_lock(inode);
+ if (pos > i_size_read(inode)) + truncate_pagecache(inode, i_size_read(inode)); + valid_size = ei->valid_size;
ret = generic_write_checks(iocb, iter); diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 82acff400f4c..75c6d5046e31 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -801,10 +801,21 @@ static int exfat_init_fs_context(struct fs_context *fc) ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
- sbi->options.fs_uid = current_uid(); - sbi->options.fs_gid = current_gid(); - sbi->options.fs_fmask = current->fs->umask; - sbi->options.fs_dmask = current->fs->umask; + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE && fc->root) { + struct super_block *sb = fc->root->d_sb; + struct exfat_mount_options *cur_opts = &EXFAT_SB(sb)->options; + + sbi->options.fs_uid = cur_opts->fs_uid; + sbi->options.fs_gid = cur_opts->fs_gid; + sbi->options.fs_fmask = cur_opts->fs_fmask; + sbi->options.fs_dmask = cur_opts->fs_dmask; + } else { + sbi->options.fs_uid = current_uid(); + sbi->options.fs_gid = current_gid(); + sbi->options.fs_fmask = current->fs->umask; + sbi->options.fs_dmask = current->fs->umask; + } + sbi->options.allow_utime = -1; sbi->options.iocharset = exfat_default_iocharset; sbi->options.errors = EXFAT_ERRORS_RO; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 21d228073d79..0885a56e57fd 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1292,7 +1292,6 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap, sizeof(gen)); }
- ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = sbi->s_want_extra_isize; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ae513b14fd08..f8fee863a022 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4878,7 +4878,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ei->i_projid = make_kprojid(&init_user_ns, i_projid); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
- ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ei->i_inline_off = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 96bb2d2366d6..ed2d63b2e91d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -778,6 +778,8 @@ static void __mb_check_buddy(struct ext4_buddy *e4b, char *file, ext4_group_t groupnr; struct ext4_prealloc_space *pa; pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); + if (!pa->pa_len) + continue; ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); MB_CHECK_ASSERT(groupnr == e4b->bd_group); for (i = 0; i < pa->pa_len; i++) diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c index 05997b4d0120..e5a6cb533242 100644 --- a/fs/ext4/orphan.c +++ b/fs/ext4/orphan.c @@ -8,6 +8,8 @@ #include "ext4.h" #include "ext4_jbd2.h"
+#define EXT4_MAX_ORPHAN_FILE_BLOCKS 512 + static int ext4_orphan_file_add(handle_t *handle, struct inode *inode) { int i, j, start; @@ -589,7 +591,7 @@ int ext4_init_orphan_info(struct super_block *sb) * consuming absurd amounts of memory when pinning blocks of orphan * file in memory. */ - if (inode->i_size > 8 << 20) { + if (inode->i_size > (EXT4_MAX_ORPHAN_FILE_BLOCKS << inode->i_blkbits)) { ext4_msg(sb, KERN_ERR, "orphan file too big: %llu", (unsigned long long)inode->i_size); ret = -EFSCORRUPTED; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 78ecdedadb07..d26a754bb9c8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1418,6 +1418,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
inode_set_iversion(&ei->vfs_inode, 1); ei->i_flags = 0; + ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ spin_lock_init(&ei->i_raw_lock); ei->i_prealloc_node = RB_ROOT; atomic_set(&ei->i_prealloc_active, 0); @@ -2493,7 +2494,7 @@ static int parse_apply_sb_mount_options(struct super_block *sb, struct ext4_fs_context *m_ctx) { struct ext4_sb_info *sbi = EXT4_SB(sb); - char s_mount_opts[65]; + char s_mount_opts[64]; struct ext4_fs_context *s_ctx = NULL; struct fs_context *fc = NULL; int ret = -ENOMEM; @@ -2501,7 +2502,8 @@ static int parse_apply_sb_mount_options(struct super_block *sb, if (!sbi->s_es->s_mount_opts[0]) return 0;
- strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts); + if (strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts) < 0) + return -E2BIG;
fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); if (!fc) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index efaad43a7aab..e32aec0e25f0 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1174,7 +1174,11 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent, if (block_csum) end = (void *)bh->b_data + bh->b_size; else { - ext4_get_inode_loc(parent, &iloc); + err = ext4_get_inode_loc(parent, &iloc); + if (err) { + EXT4_ERROR_INODE(parent, "parent inode loc (error %d)", err); + return; + } end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size; }
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index fcd21bb060cd..70bb5ded4fd6 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -757,10 +757,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) ret = -EFSCORRUPTED;
/* Avoid f2fs_commit_super in irq context */ - if (!in_task) - f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION); - else - f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); + f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); goto out_release; }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index bbb29b602438..ea831671e3ad 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3272,6 +3272,19 @@ static inline bool __should_serialize_io(struct inode *inode, return false; }
+static inline void account_writeback(struct inode *inode, bool inc) +{ + if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) + return; + + f2fs_down_read(&F2FS_I(inode)->i_sem); + if (inc) + atomic_inc(&F2FS_I(inode)->writeback); + else + atomic_dec(&F2FS_I(inode)->writeback); + f2fs_up_read(&F2FS_I(inode)->i_sem); +} + static int __f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc, enum iostat_type io_type) @@ -3321,10 +3334,14 @@ static int __f2fs_write_data_pages(struct address_space *mapping, locked = true; }
+ account_writeback(inode, true); + blk_start_plug(&plug); ret = f2fs_write_cache_pages(mapping, wbc, io_type); blk_finish_plug(&plug);
+ account_writeback(inode, false); + if (locked) mutex_unlock(&sbi->writepages);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index fe5e91c6e910..e9b60389403f 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -756,7 +756,7 @@ static void __update_extent_tree_range(struct inode *inode, } goto out_read_extent_cache; update_age_extent_cache: - if (!tei->last_blocks) + if (tei->last_blocks == F2FS_EXTENT_AGE_INVALID) goto out_read_extent_cache;
__set_extent_info(&ei, fofs, len, 0, false, @@ -860,7 +860,7 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei, cur_age = cur_blocks - tei.last_blocks; else /* allocated_data_blocks overflow */ - cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks; + cur_age = (ULLONG_MAX - 1) - tei.last_blocks + cur_blocks;
if (tei.age) ei->age = __calculate_block_age(sbi, cur_age, tei.age); @@ -1062,6 +1062,7 @@ void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, struct extent_info ei = { .fofs = fofs, .len = len, + .last_blocks = F2FS_EXTENT_AGE_INVALID, };
if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE)) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 695f74875b8f..998ac993543e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -249,6 +249,7 @@ enum { #define DEF_CP_INTERVAL 60 /* 60 secs */ #define DEF_IDLE_INTERVAL 5 /* 5 secs */ #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ +#define DEF_ENABLE_INTERVAL 16 /* 16 secs */ #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
@@ -644,6 +645,12 @@ enum extent_type { NR_EXTENT_CACHES, };
+/* + * Reserved value to mark invalid age extents, hence valid block range + * from 0 to ULLONG_MAX-1 + */ +#define F2FS_EXTENT_AGE_INVALID ULLONG_MAX + struct extent_info { unsigned int fofs; /* start offset in a file */ unsigned int len; /* length of the extent */ @@ -853,6 +860,7 @@ struct f2fs_inode_info { /* linked in global inode list for cache donation */ struct list_head gdonate_list; pgoff_t donate_start, donate_end; /* inclusive */ + atomic_t open_count; /* # of open files */
struct task_struct *atomic_write_task; /* store atomic write task */ struct extent_tree *extent_tree[NR_EXTENT_CACHES]; @@ -880,6 +888,7 @@ struct f2fs_inode_info { unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ unsigned char i_compress_flag; /* compress flag */ unsigned int i_cluster_size; /* cluster size */ + atomic_t writeback; /* count # of writeback thread */
unsigned int atomic_write_cnt; loff_t original_i_size; /* original i_size before atomic write */ @@ -1343,6 +1352,7 @@ enum { DISCARD_TIME, GC_TIME, DISABLE_TIME, + ENABLE_TIME, UMOUNT_DISCARD_TIMEOUT, MAX_TIME, }; @@ -1798,9 +1808,6 @@ struct f2fs_sb_info { spinlock_t error_lock; /* protect errors/stop_reason array */ bool error_dirty; /* errors of sb is dirty */
- struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ - unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ - /* For reclaimed segs statistics per each GC mode */ unsigned int gc_segment_mode; /* GC state for reclaimed segments */ unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ @@ -3597,6 +3604,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); void f2fs_update_inode(struct inode *inode, struct page *node_page); void f2fs_update_inode_page(struct inode *inode); int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); +void f2fs_remove_donate_inode(struct inode *inode); void f2fs_evict_inode(struct inode *inode); void f2fs_handle_failed_inode(struct inode *inode);
@@ -3694,7 +3702,6 @@ void f2fs_quota_off_umount(struct super_block *sb); void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason); void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); -void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); @@ -4536,7 +4543,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode) f2fs_up_write(&fi->i_sem); return true; } - if (f2fs_is_mmap_file(inode) || + if (f2fs_is_mmap_file(inode) || atomic_read(&fi->writeback) || (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { f2fs_up_write(&fi->i_sem); return false; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 67053bf6ca3e..3a56ab6a22c0 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -631,7 +631,10 @@ static int f2fs_file_open(struct inode *inode, struct file *filp) if (err) return err;
- return finish_preallocate_blocks(inode); + err = finish_preallocate_blocks(inode); + if (!err) + atomic_inc(&F2FS_I(inode)->open_count); + return err; }
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) @@ -1594,8 +1597,11 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, f2fs_set_data_blkaddr(dn, NEW_ADDR); }
- f2fs_update_read_extent_cache_range(dn, start, 0, index - start); - f2fs_update_age_extent_cache_range(dn, start, index - start); + if (index > start) { + f2fs_update_read_extent_cache_range(dn, start, 0, + index - start); + f2fs_update_age_extent_cache_range(dn, start, index - start); + }
return ret; } @@ -1989,6 +1995,9 @@ static long f2fs_fallocate(struct file *file, int mode,
static int f2fs_release_file(struct inode *inode, struct file *filp) { + if (atomic_dec_and_test(&F2FS_I(inode)->open_count)) + f2fs_remove_donate_inode(inode); + /* * f2fs_release_file is called at every close calls. So we should * not drop any inmemory pages by close called by other process. @@ -2062,8 +2071,9 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
f2fs_down_write(&fi->i_sem); if (!f2fs_may_compress(inode) || - (S_ISREG(inode->i_mode) && - F2FS_HAS_BLOCKS(inode))) { + atomic_read(&fi->writeback) || + (S_ISREG(inode->i_mode) && + F2FS_HAS_BLOCKS(inode))) { f2fs_up_write(&fi->i_sem); return -EINVAL; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 2dda8f23c0b9..dfd1e44086a6 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -787,7 +787,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct sit_info *sm = SIT_I(sbi); - struct victim_sel_policy p; + struct victim_sel_policy p = {0}; unsigned int secno, last_victim; unsigned int last_segment; unsigned int nsearched; diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index c77184dbc71c..f6ba15c095f8 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -807,7 +807,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; }
-static void f2fs_remove_donate_inode(struct inode *inode) +void f2fs_remove_donate_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 781b872fac8c..05e802c1286d 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1044,9 +1044,11 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (whiteout) { set_inode_flag(whiteout, FI_INC_LINK); err = f2fs_add_link(old_dentry, whiteout); - if (err) + if (err) { + d_invalidate(old_dentry); + d_invalidate(new_dentry); goto put_out_dir; - + } spin_lock(&whiteout->i_lock); whiteout->i_state &= ~I_LINKABLE; spin_unlock(&whiteout->i_lock); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index e4d81b8705d1..bc105da3e746 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -398,7 +398,7 @@ static int sanity_check_node_chain(struct f2fs_sb_info *sbi, block_t blkaddr, }
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, - bool check_only) + bool check_only, bool *new_inode) { struct curseg_info *curseg; struct page *page = NULL; @@ -445,16 +445,19 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, quota_inode = true; }
- /* - * CP | dnode(F) | inode(DF) - * For this case, we should not give up now. - */ entry = add_fsync_inode(sbi, head, ino_of_node(page), quota_inode); if (IS_ERR(entry)) { err = PTR_ERR(entry); - if (err == -ENOENT) + /* + * CP | dnode(F) | inode(DF) + * For this case, we should not give up now. + */ + if (err == -ENOENT) { + if (check_only) + *new_inode = true; goto next; + } f2fs_put_page(page, 1); break; } @@ -852,6 +855,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) int ret = 0; unsigned long s_flags = sbi->sb->s_flags; bool need_writecp = false; + bool new_inode = false;
if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) f2fs_info(sbi, "recover fsync data on readonly fs"); @@ -864,8 +868,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) f2fs_down_write(&sbi->cp_global_sem);
/* step #1: find fsynced inode numbers */ - err = find_fsync_dnodes(sbi, &inode_list, check_only); - if (err || list_empty(&inode_list)) + err = find_fsync_dnodes(sbi, &inode_list, check_only, &new_inode); + if (err < 0 || (list_empty(&inode_list) && (!check_only || !new_inode))) goto skip;
if (check_only) { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 8ac6206110a1..819b92fd94e6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -3746,8 +3746,13 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
- if (IS_DATASEG(curseg->seg_type)) - atomic64_inc(&sbi->allocated_data_blocks); + if (IS_DATASEG(curseg->seg_type)) { + unsigned long long new_val; + + new_val = atomic64_inc_return(&sbi->allocated_data_blocks); + if (unlikely(new_val == ULLONG_MAX)) + atomic64_set(&sbi->allocated_data_blocks, 0); + }
up_write(&sit_i->sentry_lock);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index ae7263954404..3001ad8df5d1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1425,6 +1425,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Initialize f2fs-specific inode info */ atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->i_compr_blocks, 0); + atomic_set(&fi->open_count, 0); + atomic_set(&fi->writeback, 0); init_f2fs_rwsem(&fi->i_sem); spin_lock_init(&fi->i_size_lock); INIT_LIST_HEAD(&fi->dirty_list); @@ -1654,14 +1656,6 @@ static void f2fs_put_super(struct super_block *sb) truncate_inode_pages_final(META_MAPPING(sbi)); }
- for (i = 0; i < NR_COUNT_TYPE; i++) { - if (!get_pages(sbi, i)) - continue; - f2fs_err(sbi, "detect filesystem reference count leak during " - "umount, type: %d, count: %lld", i, get_pages(sbi, i)); - f2fs_bug_on(sbi, 1); - } - f2fs_bug_on(sbi, sbi->fsync_node_num);
f2fs_destroy_compress_inode(sbi); @@ -1672,6 +1666,15 @@ static void f2fs_put_super(struct super_block *sb) iput(sbi->meta_inode); sbi->meta_inode = NULL;
+ /* Should check the page counts after dropping all node/meta pages */ + for (i = 0; i < NR_COUNT_TYPE; i++) { + if (!get_pages(sbi, i)) + continue; + f2fs_err(sbi, "detect filesystem reference count leak during " + "umount, type: %d, count: %lld", i, get_pages(sbi, i)); + f2fs_bug_on(sbi, 1); + } + /* * iput() can update stat information, if f2fs_write_checkpoint() * above failed with error. @@ -1694,7 +1697,6 @@ static void f2fs_put_super(struct super_block *sb) kfree(sbi->raw_super);
f2fs_destroy_page_array_cache(sbi); - f2fs_destroy_xattr_caches(sbi); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(F2FS_OPTION(sbi).s_qf_names[i]); @@ -2276,21 +2278,40 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) restore_flag: sbi->gc_mode = gc_mode; sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ + f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err); return err; }
-static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) +static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) { - int retry = DEFAULT_RETRY_IO_COUNT; + unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16; + long long start, writeback, end; + int ret; + + f2fs_info(sbi, "f2fs_enable_checkpoint() starts, meta: %lld, node: %lld, data: %lld", + get_pages(sbi, F2FS_DIRTY_META), + get_pages(sbi, F2FS_DIRTY_NODES), + get_pages(sbi, F2FS_DIRTY_DATA)); + + f2fs_update_time(sbi, ENABLE_TIME); + + start = ktime_get();
/* we should flush all the data to keep data consistency */ - do { - sync_inodes_sb(sbi->sb); + while (get_pages(sbi, F2FS_DIRTY_DATA)) { + writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC); f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); - } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
- if (unlikely(retry < 0)) - f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); + if (f2fs_time_over(sbi, ENABLE_TIME)) + break; + } + writeback = ktime_get(); + + sync_inodes_sb(sbi->sb); + + if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA))) + f2fs_warn(sbi, "checkpoint=enable has some unwritten data: %lld", + get_pages(sbi, F2FS_DIRTY_DATA));
f2fs_down_write(&sbi->gc_lock); f2fs_dirty_to_prefree(sbi); @@ -2299,10 +2320,19 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) set_sbi_flag(sbi, SBI_IS_DIRTY); f2fs_up_write(&sbi->gc_lock);
- f2fs_sync_fs(sbi->sb, 1); + ret = f2fs_sync_fs(sbi->sb, 1); + if (ret) + f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret);
/* Let's ensure there's no pending checkpoint anymore */ f2fs_flush_ckpt_thread(sbi); + + end = ktime_get(); + + f2fs_info(sbi, "f2fs_enable_checkpoint() finishes, writeback:%llu, sync:%llu", + ktime_ms_delta(writeback, start), + ktime_ms_delta(end, writeback)); + return ret; }
static int f2fs_remount(struct super_block *sb, int *flags, char *data) @@ -2517,7 +2547,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) goto restore_discard; need_enable_checkpoint = true; } else { - f2fs_enable_checkpoint(sbi); + err = f2fs_enable_checkpoint(sbi); + if (err) + goto restore_discard; need_disable_checkpoint = true; } } @@ -2559,7 +2591,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) return 0; restore_checkpoint: if (need_enable_checkpoint) { - f2fs_enable_checkpoint(sbi); + if (f2fs_enable_checkpoint(sbi)) + f2fs_warn(sbi, "checkpoint has not been enabled"); } else if (need_disable_checkpoint) { if (f2fs_disable_checkpoint(sbi)) f2fs_warn(sbi, "checkpoint has not been disabled"); @@ -3866,6 +3899,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL; sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL; sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL; + sbi->interval_time[ENABLE_TIME] = DEF_ENABLE_INTERVAL; sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] = DEF_UMOUNT_DISCARD_TIMEOUT; clear_sbi_flag(sbi, SBI_NEED_FSCK); @@ -4142,48 +4176,7 @@ void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) spin_unlock_irqrestore(&sbi->error_lock, flags); }
-static bool f2fs_update_errors(struct f2fs_sb_info *sbi) -{ - unsigned long flags; - bool need_update = false; - - spin_lock_irqsave(&sbi->error_lock, flags); - if (sbi->error_dirty) { - memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, - MAX_F2FS_ERRORS); - sbi->error_dirty = false; - need_update = true; - } - spin_unlock_irqrestore(&sbi->error_lock, flags); - - return need_update; -} - -static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error) -{ - int err; - - f2fs_down_write(&sbi->sb_lock); - - if (!f2fs_update_errors(sbi)) - goto out_unlock; - - err = f2fs_commit_super(sbi, false); - if (err) - f2fs_err_ratelimited(sbi, - "f2fs_commit_super fails to record errors:%u, err:%d", - error, err); -out_unlock: - f2fs_up_write(&sbi->sb_lock); -} - void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) -{ - f2fs_save_errors(sbi, error); - f2fs_record_errors(sbi, error); -} - -void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error) { f2fs_save_errors(sbi, error);
@@ -4608,13 +4601,9 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto free_iostat;
- /* init per sbi slab cache */ - err = f2fs_init_xattr_caches(sbi); - if (err) - goto free_percpu; err = f2fs_init_page_array_cache(sbi); if (err) - goto free_xattr_cache; + goto free_percpu;
/* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); @@ -4816,11 +4805,15 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) } } else { err = f2fs_recover_fsync_data(sbi, true); - - if (!f2fs_readonly(sb) && err > 0) { - err = -EINVAL; - f2fs_err(sbi, "Need to recover fsync data"); - goto free_meta; + if (err > 0) { + if (!f2fs_readonly(sb)) { + f2fs_err(sbi, "Need to recover fsync data"); + err = -EINVAL; + goto free_meta; + } else { + f2fs_info(sbi, "drop all fsynced data"); + err = 0; + } } }
@@ -4843,20 +4836,19 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto free_meta;
+ /* f2fs_recover_fsync_data() cleared this already */ + clear_sbi_flag(sbi, SBI_POR_DOING); + err = f2fs_init_inmem_curseg(sbi); if (err) goto sync_free_meta;
- /* f2fs_recover_fsync_data() cleared this already */ - clear_sbi_flag(sbi, SBI_POR_DOING); - - if (test_opt(sbi, DISABLE_CHECKPOINT)) { + if (test_opt(sbi, DISABLE_CHECKPOINT)) err = f2fs_disable_checkpoint(sbi); - if (err) - goto sync_free_meta; - } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { - f2fs_enable_checkpoint(sbi); - } + else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) + err = f2fs_enable_checkpoint(sbi); + if (err) + goto sync_free_meta;
/* * If filesystem is not mounted as read-only then @@ -4942,8 +4934,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sbi->meta_inode = NULL; free_page_array_cache: f2fs_destroy_page_array_cache(sbi); -free_xattr_cache: - f2fs_destroy_xattr_caches(sbi); free_percpu: destroy_percpu_info(sbi); free_iostat: @@ -5105,10 +5095,15 @@ static int __init init_f2fs_fs(void) err = f2fs_create_casefold_cache(); if (err) goto free_compress_cache; - err = register_filesystem(&f2fs_fs_type); + err = f2fs_init_xattr_cache(); if (err) goto free_casefold_cache; + err = register_filesystem(&f2fs_fs_type); + if (err) + goto free_xattr_cache; return 0; +free_xattr_cache: + f2fs_destroy_xattr_cache(); free_casefold_cache: f2fs_destroy_casefold_cache(); free_compress_cache: @@ -5149,6 +5144,7 @@ static int __init init_f2fs_fs(void) static void __exit exit_f2fs_fs(void) { unregister_filesystem(&f2fs_fs_type); + f2fs_destroy_xattr_cache(); f2fs_destroy_casefold_cache(); f2fs_destroy_compress_cache(); f2fs_destroy_compress_mempool(); diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 3f3874943679..7e9f24e52151 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -23,11 +23,12 @@ #include "xattr.h" #include "segment.h"
+static struct kmem_cache *inline_xattr_slab; static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline) { - if (likely(size == sbi->inline_xattr_slab_size)) { + if (likely(size == DEFAULT_XATTR_SLAB_SIZE)) { *is_inline = true; - return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab, + return f2fs_kmem_cache_alloc(inline_xattr_slab, GFP_F2FS_ZERO, false, sbi); } *is_inline = false; @@ -38,7 +39,7 @@ static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr, bool is_inline) { if (is_inline) - kmem_cache_free(sbi->inline_xattr_slab, xattr_addr); + kmem_cache_free(inline_xattr_slab, xattr_addr); else kfree(xattr_addr); } @@ -830,25 +831,14 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name, return err; }
-int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) +int __init f2fs_init_xattr_cache(void) { - dev_t dev = sbi->sb->s_bdev->bd_dev; - char slab_name[32]; - - sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev)); - - sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size * - sizeof(__le32) + XATTR_PADDING_SIZE; - - sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name, - sbi->inline_xattr_slab_size); - if (!sbi->inline_xattr_slab) - return -ENOMEM; - - return 0; + inline_xattr_slab = f2fs_kmem_cache_create("f2fs_xattr_entry", + DEFAULT_XATTR_SLAB_SIZE); + return inline_xattr_slab ? 0 : -ENOMEM; }
-void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) +void f2fs_destroy_xattr_cache(void) { - kmem_cache_destroy(sbi->inline_xattr_slab); + kmem_cache_destroy(inline_xattr_slab); } diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h index a005ffdcf717..8f6e32b4e965 100644 --- a/fs/f2fs/xattr.h +++ b/fs/f2fs/xattr.h @@ -89,6 +89,8 @@ struct f2fs_xattr_entry { F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) - \ DEF_INLINE_RESERVED_SIZE - \ MIN_INLINE_DENTRY_SIZE / sizeof(__le32)) +#define DEFAULT_XATTR_SLAB_SIZE (DEFAULT_INLINE_XATTR_ADDRS * \ + sizeof(__le32) + XATTR_PADDING_SIZE)
/* * On-disk structure of f2fs_xattr @@ -132,8 +134,8 @@ extern int f2fs_setxattr(struct inode *, int, const char *, extern int f2fs_getxattr(struct inode *, int, const char *, void *, size_t, struct page *); extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t); -extern int f2fs_init_xattr_caches(struct f2fs_sb_info *); -extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *); +extern int __init f2fs_init_xattr_cache(void); +extern void f2fs_destroy_xattr_cache(void); #else
#define f2fs_xattr_handlers NULL @@ -150,8 +152,8 @@ static inline int f2fs_getxattr(struct inode *inode, int index, { return -EOPNOTSUPP; } -static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; } -static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { } +static inline int __init f2fs_init_xattr_cache(void) { return 0; } +static inline void f2fs_destroy_xattr_cache(void) { } #endif
#ifdef CONFIG_F2FS_FS_SECURITY diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a8218a3bc0b4..23afaadd5f3e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -109,7 +109,9 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) fuse_file_io_release(ff, ra->inode);
if (!args) { - /* Do nothing when server does not implement 'open' */ + /* Do nothing when server does not implement 'opendir' */ + } else if (args->opcode == FUSE_RELEASE && ff->fm->fc->no_open) { + fuse_release_end(ff->fm, args, 0); } else if (sync) { fuse_simple_request(ff->fm, args); fuse_release_end(ff->fm, args, 0); @@ -130,8 +132,17 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, struct fuse_file *ff; int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; bool open = isdir ? !fc->no_opendir : !fc->no_open; + bool release = !isdir || open;
- ff = fuse_file_alloc(fm, open); + /* + * ff->args->release_args still needs to be allocated (so we can hold an + * inode reference while there are pending inflight file operations when + * ->release() is called, see fuse_prepare_release()) even if + * fc->no_open is set else it becomes possible for reclaim to deadlock + * if while servicing the readahead request the server triggers reclaim + * and reclaim evicts the inode of the file being read ahead. + */ + ff = fuse_file_alloc(fm, release); if (!ff) return ERR_PTR(-ENOMEM);
@@ -151,13 +162,14 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, fuse_file_free(ff); return ERR_PTR(err); } else { - /* No release needed */ - kfree(ff->args); - ff->args = NULL; - if (isdir) + if (isdir) { + /* No release needed */ + kfree(ff->args); + ff->args = NULL; fc->no_opendir = 1; - else + } else { fc->no_open = 1; + } } }
@@ -1582,7 +1594,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (!ia) return -ENOMEM;
- if (fopen_direct_io && fc->direct_io_allow_mmap) { + if (fopen_direct_io) { res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); if (res) { fuse_io_free(ia); @@ -1656,6 +1668,15 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (res > 0) *ppos = pos;
+ if (res > 0 && write && fopen_direct_io) { + /* + * As in generic_file_direct_write(), invalidate after the + * write, to invalidate read-ahead cache that may have competed + * with the write. + */ + invalidate_inode_pages2_range(mapping, idx_from, idx_to); + } + return res > 0 ? res : err; } EXPORT_SYMBOL_GPL(fuse_direct_io); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 1ed42f0e6ec7..d13a050bcb9d 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -631,8 +631,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) struct gfs2_inode *ip = gl->gl_object; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- if (!remote || sb_rdonly(sdp->sd_vfs) || - test_bit(SDF_KILL, &sdp->sd_flags)) + if (!remote || test_bit(SDF_KILL, &sdp->sd_flags)) return;
if (gl->gl_demote_state == LM_ST_UNLOCKED && diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 314ec2a70167..2e92b606d19e 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -485,7 +485,7 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); - bio_chain(new, prev); + bio_chain(prev, new); submit_bio(prev); return new; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 2e6bc77f4f81..642584265a6f 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1617,7 +1617,7 @@ int gfs2_quotad(void *data)
t = min(quotad_timeo, statfs_timeo);
- t = wait_event_freezable_timeout(sdp->sd_quota_wait, + t -= wait_event_freezable_timeout(sdp->sd_quota_wait, sdp->sd_statfs_force_sync || gfs2_withdrawing_or_withdrawn(sdp) || kthread_should_stop(), diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 3b1303f97a3b..e6f8be03190c 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -759,9 +759,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) break; }
- error = gfs2_do_thaw(sdp, who); - if (error) - goto out; + (void)gfs2_do_thaw(sdp, who);
if (error == -EBUSY) fs_err(sdp, "waiting for recovery before freeze\n"); diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 407d5152eb41..c0089849be50 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -481,6 +481,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) tree->node_hash[hash] = node; tree->node_hash_cnt++; } else { + hfs_bnode_get(node2); spin_unlock(&tree->hash_lock); kfree(node); wait_event(node2->lock_wq, @@ -704,6 +705,5 @@ bool hfs_bnode_need_zeroout(struct hfs_btree *tree) struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
- return tree->cnid == HFSPLUS_CAT_CNID && - volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX; + return volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX; } diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index f5c4b3e31a1c..33154c720a4e 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -552,8 +552,13 @@ static int hfsplus_rename(struct mnt_idmap *idmap, res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); - if (!res) + if (!res) { new_dentry->d_fsdata = old_dentry->d_fsdata; + + res = hfsplus_cat_write_inode(old_dir); + if (!res) + res = hfsplus_cat_write_inode(new_dir); + } return res; }
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index c85b5802ec0f..2d68c52f894f 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -178,13 +178,29 @@ const struct dentry_operations hfsplus_dentry_operations = { .d_compare = hfsplus_compare_dentry, };
-static void hfsplus_get_perms(struct inode *inode, - struct hfsplus_perm *perms, int dir) +static int hfsplus_get_perms(struct inode *inode, + struct hfsplus_perm *perms, int dir) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); u16 mode;
mode = be16_to_cpu(perms->mode); + if (dir) { + if (mode && !S_ISDIR(mode)) + goto bad_type; + } else if (mode) { + switch (mode & S_IFMT) { + case S_IFREG: + case S_IFLNK: + case S_IFCHR: + case S_IFBLK: + case S_IFIFO: + case S_IFSOCK: + break; + default: + goto bad_type; + } + }
i_uid_write(inode, be32_to_cpu(perms->owner)); if ((test_bit(HFSPLUS_SB_UID, &sbi->flags)) || (!i_uid_read(inode) && !mode)) @@ -210,6 +226,10 @@ static void hfsplus_get_perms(struct inode *inode, inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; + return 0; +bad_type: + pr_err("invalid file type 0%04o for inode %lu\n", mode, inode->i_ino); + return -EIO; }
static int hfsplus_file_open(struct inode *inode, struct file *file) @@ -514,7 +534,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) } hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_folder)); - hfsplus_get_perms(inode, &folder->permissions, 1); + res = hfsplus_get_perms(inode, &folder->permissions, 1); + if (res) + goto out; set_nlink(inode, 1); inode->i_size = 2 + be32_to_cpu(folder->valence); inode_set_atime_to_ts(inode, hfsp_mt2ut(folder->access_date)); @@ -543,7 +565,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ? &file->rsrc_fork : &file->data_fork); - hfsplus_get_perms(inode, &file->permissions, 0); + res = hfsplus_get_perms(inode, &file->permissions, 0); + if (res) + goto out; set_nlink(inode, 1); if (S_ISREG(inode->i_mode)) { if (file->permissions.dev) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index d4b990938399..397c96c25c31 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -227,6 +227,22 @@ static void ifs_free(struct folio *folio) kfree(ifs); }
+/* + * Calculate how many bytes to truncate based off the number of blocks to + * truncate and the end position to start truncating from. + */ +static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits, + unsigned blocks_truncated) +{ + unsigned block_size = 1 << block_bits; + unsigned block_offset = end_pos & (block_size - 1); + + if (!block_offset) + return blocks_truncated << block_bits; + + return ((blocks_truncated - 1) << block_bits) + block_offset; +} + /* * Calculate the range inside the folio that we actually need to read. */ @@ -250,22 +266,30 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, * to avoid reading in already uptodate ranges. */ if (ifs) { - unsigned int i; + unsigned int i, blocks_skipped;
/* move forward for each leading block marked uptodate */ - for (i = first; i <= last; i++) { + for (i = first; i <= last; i++) if (!ifs_block_is_uptodate(ifs, i)) break; - *pos += block_size; - poff += block_size; - plen -= block_size; - first++; + + blocks_skipped = i - first; + if (blocks_skipped) { + unsigned long block_offset = *pos & (block_size - 1); + unsigned bytes_skipped = + (blocks_skipped << block_bits) - block_offset; + + *pos += bytes_skipped; + poff += bytes_skipped; + plen -= bytes_skipped; } + first = i;
/* truncate len if we find any trailing uptodate block(s) */ while (++i <= last) { if (ifs_block_is_uptodate(ifs, i)) { - plen -= (last - i + 1) * block_size; + plen -= iomap_bytes_to_truncate(*pos + plen, + block_bits, last - i + 1); last = i - 1; break; } @@ -281,7 +305,8 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end) - plen -= (last - end) * block_size; + plen -= iomap_bytes_to_truncate(*pos + plen, block_bits, + last - end); }
*offp = poff; diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index aba9d5ce819d..52dd8c9c3f6f 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -674,12 +674,12 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, } goto out_free_dio; } + }
- if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { - ret = sb_init_dio_done_wq(inode->i_sb); - if (ret < 0) - goto out_free_dio; - } + if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { + ret = sb_init_dio_done_wq(inode->i_sb); + if (ret < 0) + goto out_free_dio; }
inode_dio_begin(inode); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index c073f5fb9859..e64a7487e315 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1526,7 +1526,6 @@ static journal_t *journal_init_common(struct block_device *bdev, struct block_device *fs_dev, unsigned long long start, int len, int blocksize) { - static struct lock_class_key jbd2_trans_commit_key; journal_t *journal; int err; int n; @@ -1535,6 +1534,7 @@ static journal_t *journal_init_common(struct block_device *bdev, if (!journal) return ERR_PTR(-ENOMEM);
+ lockdep_register_key(&journal->jbd2_trans_commit_key); journal->j_blocksize = blocksize; journal->j_dev = bdev; journal->j_fs_dev = fs_dev; @@ -1565,7 +1565,7 @@ static journal_t *journal_init_common(struct block_device *bdev, journal->j_max_batch_time = 15000; /* 15ms */ atomic_set(&journal->j_reserved_credits, 0); lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", - &jbd2_trans_commit_key, 0); + &journal->jbd2_trans_commit_key, 0);
/* The journal is marked for error until we succeed with recovery! */ journal->j_flags = JBD2_ABORT; @@ -1618,6 +1618,7 @@ static journal_t *journal_init_common(struct block_device *bdev, kfree(journal->j_wbuf); jbd2_journal_destroy_revoke(journal); journal_fail_superblock(journal); + lockdep_unregister_key(&journal->jbd2_trans_commit_key); kfree(journal); return ERR_PTR(err); } @@ -2199,6 +2200,7 @@ int jbd2_journal_destroy(journal_t *journal) crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_fc_wbuf); kfree(journal->j_wbuf); + lockdep_unregister_key(&journal->jbd2_trans_commit_key); kfree(journal);
return err; @@ -2373,6 +2375,12 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat, sb->s_feature_compat |= cpu_to_be32(compat); sb->s_feature_ro_compat |= cpu_to_be32(ro); sb->s_feature_incompat |= cpu_to_be32(incompat); + /* + * Update the checksum now so that it is valid even for read-only + * filesystems where jbd2_write_superblock() doesn't get called. + */ + if (jbd2_journal_has_csum_v2or3(journal)) + sb->s_checksum = jbd2_superblock_csum(journal, sb); unlock_buffer(journal->j_sb_buffer); jbd2_journal_init_transaction_limits(journal);
@@ -2402,9 +2410,17 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
sb = journal->j_superblock;
+ lock_buffer(journal->j_sb_buffer); sb->s_feature_compat &= ~cpu_to_be32(compat); sb->s_feature_ro_compat &= ~cpu_to_be32(ro); sb->s_feature_incompat &= ~cpu_to_be32(incompat); + /* + * Update the checksum now so that it is valid even for read-only + * filesystems where jbd2_write_superblock() doesn't get called. + */ + if (jbd2_journal_has_csum_v2or3(journal)) + sb->s_checksum = jbd2_superblock_csum(journal, sb); + unlock_buffer(journal->j_sb_buffer); jbd2_journal_init_transaction_limits(journal); } EXPORT_SYMBOL(jbd2_journal_clear_features); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index c50bec6e5405..682e65b6db11 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -445,7 +445,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle, read_unlock(&journal->j_state_lock); current->journal_info = handle;
- rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_); + rwsem_acquire_read(&journal->j_trans_commit_map, 0, 1, _THIS_IP_); jbd2_journal_free_transaction(new_transaction); /* * Ensure that no allocations done while the transaction is open are diff --git a/fs/libfs.c b/fs/libfs.c index 874324167849..028f2cf729d5 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -345,22 +345,22 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry) * User space expects the directory offset value of the replaced * (new) directory entry to be unchanged after a rename. * - * Returns zero on success, a negative errno value on failure. + * Caller must have grabbed a slot for new_dentry in the maple_tree + * associated with new_dir, even if dentry is negative. */ -int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) +void simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) { struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir); struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir); long new_offset = dentry2offset(new_dentry);
- simple_offset_remove(old_ctx, old_dentry); + if (WARN_ON(!new_offset)) + return;
- if (new_offset) { - offset_set(new_dentry, 0); - return simple_offset_replace(new_ctx, old_dentry, new_offset); - } - return simple_offset_add(new_ctx, old_dentry); + simple_offset_remove(old_ctx, old_dentry); + offset_set(new_dentry, 0); + WARN_ON(simple_offset_replace(new_ctx, old_dentry, new_offset)); }
/** @@ -387,31 +387,23 @@ int simple_offset_rename_exchange(struct inode *old_dir, long new_index = dentry2offset(new_dentry); int ret;
- simple_offset_remove(old_ctx, old_dentry); - simple_offset_remove(new_ctx, new_dentry); + if (WARN_ON(!old_index || !new_index)) + return -EINVAL;
- ret = simple_offset_replace(new_ctx, old_dentry, new_index); - if (ret) - goto out_restore; + ret = mtree_store(&new_ctx->mt, new_index, old_dentry, GFP_KERNEL); + if (WARN_ON(ret)) + return ret;
- ret = simple_offset_replace(old_ctx, new_dentry, old_index); - if (ret) { - simple_offset_remove(new_ctx, old_dentry); - goto out_restore; + ret = mtree_store(&old_ctx->mt, old_index, new_dentry, GFP_KERNEL); + if (WARN_ON(ret)) { + mtree_store(&new_ctx->mt, new_index, new_dentry, GFP_KERNEL); + return ret; }
- ret = simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry); - if (ret) { - simple_offset_remove(new_ctx, old_dentry); - simple_offset_remove(old_ctx, new_dentry); - goto out_restore; - } + offset_set(old_dentry, new_index); + offset_set(new_dentry, old_index); + simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry); return 0; - -out_restore: - (void)simple_offset_replace(old_ctx, old_dentry, old_index); - (void)simple_offset_replace(new_ctx, new_dentry, new_index); - return ret; }
/** diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 8a72c418cdcc..3f633bb24623 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -96,7 +96,6 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; - struct nlm_lockowner *test_owner; __be32 rc = rpc_success;
dprintk("lockd: TEST4 called\n"); @@ -106,7 +105,6 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
- test_owner = argp->lock.fl.c.flc_owner; /* Now check for conflicting locks */ resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie); if (resp->status == nlm_drop_reply) @@ -114,7 +112,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) else dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
- nlmsvc_put_lockowner(test_owner); + nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rc; diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 1f2149db10f2..179de4f2d4c9 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -628,7 +628,13 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, }
mode = lock_to_openmode(&lock->fl); - error = vfs_test_lock(file->f_file[mode], &lock->fl); + locks_init_lock(&conflock->fl); + /* vfs_test_lock only uses start, end, and owner, but tests flc_file */ + conflock->fl.c.flc_file = lock->fl.c.flc_file; + conflock->fl.fl_start = lock->fl.fl_start; + conflock->fl.fl_end = lock->fl.fl_end; + conflock->fl.c.flc_owner = lock->fl.c.flc_owner; + error = vfs_test_lock(file->f_file[mode], &conflock->fl); if (error) { /* We can't currently deal with deferred test requests */ if (error == FILE_LOCK_DEFERRED) @@ -638,22 +644,19 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, goto out; }
- if (lock->fl.c.flc_type == F_UNLCK) { + if (conflock->fl.c.flc_type == F_UNLCK) { ret = nlm_granted; goto out; }
dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", - lock->fl.c.flc_type, (long long)lock->fl.fl_start, - (long long)lock->fl.fl_end); + conflock->fl.c.flc_type, (long long)conflock->fl.fl_start, + (long long)conflock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->len = strlen(conflock->caller); conflock->oh.len = 0; /* don't return OH info */ - conflock->svid = lock->fl.c.flc_pid; - conflock->fl.c.flc_type = lock->fl.c.flc_type; - conflock->fl.fl_start = lock->fl.fl_start; - conflock->fl.fl_end = lock->fl.fl_end; - locks_release_private(&lock->fl); + conflock->svid = conflock->fl.c.flc_pid; + locks_release_private(&conflock->fl);
ret = nlm_lck_denied; out: diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index a03220e66ce0..8cda2aba82d2 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -117,7 +117,6 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) struct nlm_args *argp = rqstp->rq_argp; struct nlm_host *host; struct nlm_file *file; - struct nlm_lockowner *test_owner; __be32 rc = rpc_success;
dprintk("lockd: TEST called\n"); @@ -127,8 +126,6 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
- test_owner = argp->lock.fl.c.flc_owner; - /* Now check for conflicting locks */ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); if (resp->status == nlm_drop_reply) @@ -137,7 +134,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp) dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers);
- nlmsvc_put_lockowner(test_owner); + nlmsvc_release_lockowner(&argp->lock); nlmsvc_release_host(host); nlm_release_file(file); return rc; diff --git a/fs/locks.c b/fs/locks.c index 204847628f3e..3bc5f8812160 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2190,13 +2190,21 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) /** * vfs_test_lock - test file byte range lock * @filp: The file to test lock for - * @fl: The lock to test; also used to hold result + * @fl: The byte-range in the file to test; also used to hold result * + * On entry, @fl does not contain a lock, but identifies a range (fl_start, fl_end) + * in the file (c.flc_file), and an owner (c.flc_owner) for whom existing locks + * should be ignored. c.flc_type and c.flc_flags are ignored. + * Both fl_lmops and fl_ops in @fl must be NULL. * Returns -ERRNO on failure. Indicates presence of conflicting lock by - * setting conf->fl_type to something other than F_UNLCK. + * setting fl->fl_type to something other than F_UNLCK. + * + * If vfs_test_lock() does find a lock and return it, the caller must + * use locks_free_lock() or locks_release_private() on the returned lock. */ int vfs_test_lock(struct file *filp, struct file_lock *fl) { + WARN_ON_ONCE(fl->fl_ops || fl->fl_lmops); WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, F_GETLK, fl); diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c index a74ec08f6c96..e6fbc45ec4f1 100644 --- a/fs/nfs_common/nfslocalio.c +++ b/fs/nfs_common/nfslocalio.c @@ -128,6 +128,10 @@ void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid) } EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client);
+/* + * Caller is responsible for calling nfsd_net_put and + * nfsd_file_put (via nfs_to_nfsd_file_put_local). + */ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, struct rpc_clnt *rpc_clnt, const struct cred *cred, const struct nfs_fh *nfs_fh, const fmode_t fmode) @@ -139,7 +143,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, * Not running in nfsd context, so must safely get reference on nfsd_serv. * But the server may already be shutting down, if so disallow new localio. * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that - * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe + * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe * and if it succeeds we will have an implied reference to the net. * * Otherwise NFS may not have ref on NFSD and therefore cannot safely @@ -147,12 +151,12 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, */ rcu_read_lock(); net = rcu_dereference(uuid->net); - if (!net || !nfs_to->nfsd_serv_try_get(net)) { + if (!net || !nfs_to->nfsd_net_try_get(net)) { rcu_read_unlock(); return ERR_PTR(-ENXIO); } rcu_read_unlock(); - /* We have an implied reference to net thanks to nfsd_serv_try_get */ + /* We have an implied reference to net thanks to nfsd_net_try_get */ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred, nfs_fh, fmode); if (IS_ERR(localio)) diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index eefe50a17c4a..bbd87cd16282 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -341,7 +341,8 @@ nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls, struct nfsd_file *file) struct block_device *bdev = file->nf_file->f_path.mnt->mnt_sb->s_bdev;
bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY, - nfsd4_scsi_pr_key(clp), 0, true); + nfsd4_scsi_pr_key(clp), + PR_EXCLUSIVE_ACCESS_REG_ONLY, true); }
const struct nfsd4_layout_ops scsi_layout_ops = { diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 3216416ca042..7369c0315a01 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1017,7 +1017,7 @@ exp_rootfh(struct net *net, struct auth_domain *clp, char *name, { struct svc_export *exp; struct path path; - struct inode *inode; + struct inode *inode __maybe_unused; struct svc_fh fh; int err; struct nfsd_net *nn = net_generic(net, nfsd_net_id); diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index d19968881855..05f0a4867673 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -391,7 +391,7 @@ nfsd_file_put(struct nfsd_file *nf) }
/** - * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller + * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller * @nf: nfsd_file of which to put the reference * * First save the associated net to return to caller, then put diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c index f441cb9f74d5..ce6d408598c7 100644 --- a/fs/nfsd/localio.c +++ b/fs/nfsd/localio.c @@ -25,8 +25,8 @@ #include "cache.h"
static const struct nfsd_localio_operations nfsd_localio_ops = { - .nfsd_serv_try_get = nfsd_serv_try_get, - .nfsd_serv_put = nfsd_serv_put, + .nfsd_net_try_get = nfsd_net_try_get, + .nfsd_net_put = nfsd_net_put, .nfsd_open_local_fh = nfsd_open_local_fh, .nfsd_file_put_local = nfsd_file_put_local, .nfsd_file_file = nfsd_file_file, diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index a05a45bb1978..ceab4a3e503f 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -140,9 +140,10 @@ struct nfsd_net {
struct svc_info nfsd_info; #define nfsd_serv nfsd_info.serv - struct percpu_ref nfsd_serv_ref; - struct completion nfsd_serv_confirm_done; - struct completion nfsd_serv_free_done; + + struct percpu_ref nfsd_net_ref; + struct completion nfsd_net_confirm_done; + struct completion nfsd_net_free_done;
/* * clientid and stateid data for construction of net unique COPY @@ -229,8 +230,8 @@ struct nfsd_net { extern bool nfsd_support_version(int vers); extern unsigned int nfsd_net_id;
-bool nfsd_serv_try_get(struct net *net); -void nfsd_serv_put(struct net *net); +bool nfsd_net_try_get(struct net *net); +void nfsd_net_put(struct net *net);
void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn); void nfsd_reset_write_verifier(struct nfsd_net *nn); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f7aa63f82bf7..eeca4329e1d0 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2989,8 +2989,10 @@ static int client_states_open(struct inode *inode, struct file *file) return -ENXIO;
ret = seq_open(file, &states_seq_ops); - if (ret) + if (ret) { + drop_client(clp); return ret; + } s = file->private_data; s->private = clp; return 0; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e6b000a4a31a..fd81db17691a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3362,6 +3362,11 @@ static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr, u32 supp[3];
memcpy(supp, nfsd_suppattrs[resp->cstate.minorversion], sizeof(supp)); + if (!IS_POSIXACL(d_inode(args->dentry))) + supp[0] &= ~FATTR4_WORD0_ACL; + if (!args->contextsupport) + supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL; + supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0; supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1; supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 45f1bb2c6f13..cc185c00e309 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -214,32 +214,32 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change return 0; }
-bool nfsd_serv_try_get(struct net *net) __must_hold(rcu) +bool nfsd_net_try_get(struct net *net) __must_hold(rcu) { struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref)); + return (nn && percpu_ref_tryget_live(&nn->nfsd_net_ref)); }
-void nfsd_serv_put(struct net *net) __must_hold(rcu) +void nfsd_net_put(struct net *net) __must_hold(rcu) { struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- percpu_ref_put(&nn->nfsd_serv_ref); + percpu_ref_put(&nn->nfsd_net_ref); }
-static void nfsd_serv_done(struct percpu_ref *ref) +static void nfsd_net_done(struct percpu_ref *ref) { - struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref); + struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
- complete(&nn->nfsd_serv_confirm_done); + complete(&nn->nfsd_net_confirm_done); }
-static void nfsd_serv_free(struct percpu_ref *ref) +static void nfsd_net_free(struct percpu_ref *ref) { - struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref); + struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref);
- complete(&nn->nfsd_serv_free_done); + complete(&nn->nfsd_net_free_done); }
/* @@ -436,6 +436,10 @@ static void nfsd_shutdown_net(struct net *net)
if (!nn->nfsd_net_up) return; + + percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done); + wait_for_completion(&nn->nfsd_net_confirm_done); + nfsd_export_flush(net); nfs4_state_shutdown_net(net); nfsd_reply_cache_shutdown(nn); @@ -444,7 +448,10 @@ static void nfsd_shutdown_net(struct net *net) lockd_down(net); nn->lockd_up = false; } - percpu_ref_exit(&nn->nfsd_serv_ref); + + wait_for_completion(&nn->nfsd_net_free_done); + percpu_ref_exit(&nn->nfsd_net_ref); + nn->nfsd_net_up = false; nfsd_shutdown_generic(); } @@ -526,11 +533,6 @@ void nfsd_destroy_serv(struct net *net)
lockdep_assert_held(&nfsd_mutex);
- percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done); - wait_for_completion(&nn->nfsd_serv_confirm_done); - wait_for_completion(&nn->nfsd_serv_free_done); - /* percpu_ref_exit is called in nfsd_shutdown_net */ - spin_lock(&nfsd_notifier_lock); nn->nfsd_serv = NULL; spin_unlock(&nfsd_notifier_lock); @@ -652,12 +654,12 @@ int nfsd_create_serv(struct net *net) if (nn->nfsd_serv) return 0;
- error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free, + error = percpu_ref_init(&nn->nfsd_net_ref, nfsd_net_free, 0, GFP_KERNEL); if (error) return error; - init_completion(&nn->nfsd_serv_free_done); - init_completion(&nn->nfsd_serv_confirm_done); + init_completion(&nn->nfsd_net_free_done); + init_completion(&nn->nfsd_net_confirm_done);
if (nfsd_max_blksize == 0) nfsd_max_blksize = nfsd_get_default_max_blksize(); @@ -665,13 +667,16 @@ int nfsd_create_serv(struct net *net) serv = svc_create_pooled(nfsd_programs, ARRAY_SIZE(nfsd_programs), &nn->nfsd_svcstats, nfsd_max_blksize, nfsd); - if (serv == NULL) + if (serv == NULL) { + percpu_ref_exit(&nn->nfsd_net_ref); return -ENOMEM; + }
serv->sv_maxconn = nn->max_connections; error = svc_bind(serv, net); if (error < 0) { svc_destroy(&serv); + percpu_ref_exit(&nn->nfsd_net_ref); return error; } spin_lock(&nfsd_notifier_lock); diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index a61ada4fd920..0380fc2a63fb 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -67,7 +67,8 @@ static inline bool nfsd_attrs_valid(struct nfsd_attrs *attrs) struct iattr *iap = attrs->na_iattr;
return (iap->ia_valid || (attrs->na_seclabel && - attrs->na_seclabel->len)); + attrs->na_seclabel->len) || + attrs->na_pacl || attrs->na_dpacl); }
__be32 nfserrno (int errno); diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index f976949d2634..c3797386e08b 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -247,8 +247,15 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, /* * Include parent/name in notification either if some notification * groups require parent info or the parent is interested in this event. + * The parent interest in ACCESS/MODIFY events does not apply to special + * files, where read/write are not on the filesystem of the parent and + * events can provide an undesirable side-channel for information + * exfiltration. */ - parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS; + parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS && + !(data_type == FSNOTIFY_EVENT_PATH && + d_is_special(dentry) && + (mask & (FS_ACCESS | FS_MODIFY))); if (parent_needed || parent_interested) { /* When notifying parent, child should be passed as data */ WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 902dc8ba878e..f1122ac5be62 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -1396,6 +1396,18 @@ static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, return iter_file_splice_write(pipe, file, ppos, len, flags); }
+/* + * ntfs_file_fsync - file_operations::fsync + */ +static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct inode *inode = file_inode(file); + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) + return -EIO; + + return generic_file_fsync(file, start, end, datasync); +} + // clang-format off const struct inode_operations ntfs_file_inode_operations = { .getattr = ntfs_getattr, @@ -1420,7 +1432,7 @@ const struct file_operations ntfs_file_operations = { .splice_write = ntfs_file_splice_write, .mmap = ntfs_file_mmap, .open = ntfs_file_open, - .fsync = generic_file_fsync, + .fsync = ntfs_file_fsync, .fallocate = ntfs_fallocate, .release = ntfs_file_release, }; diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 5491169eaa16..6b04ad5d5f51 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -2077,6 +2077,29 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, return err; }
+static struct page *ntfs_lock_new_page(struct address_space *mapping, + pgoff_t index, gfp_t gfp) +{ + struct folio *folio = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); + struct page *page; + + if (IS_ERR(folio)) + return ERR_CAST(folio); + + if (!folio_test_uptodate(folio)) + return folio_file_page(folio, index); + + /* Use a temporary page to avoid data corruption */ + folio_unlock(folio); + folio_put(folio); + page = alloc_page(gfp); + if (!page) + return ERR_PTR(-ENOMEM); + __SetPageLocked(page); + return page; +} + /* * ni_readpage_cmpr * @@ -2131,9 +2154,9 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio) if (i == idx) continue;
- pg = find_or_create_page(mapping, index, gfp_mask); - if (!pg) { - err = -ENOMEM; + pg = ntfs_lock_new_page(mapping, index, gfp_mask); + if (IS_ERR(pg)) { + err = PTR_ERR(pg); goto out1; } pages[i] = pg; @@ -2232,13 +2255,13 @@ int ni_decompress_file(struct ntfs_inode *ni) for (i = 0; i < pages_per_frame; i++, index++) { struct page *pg;
- pg = find_or_create_page(mapping, index, gfp_mask); - if (!pg) { + pg = ntfs_lock_new_page(mapping, index, gfp_mask); + if (IS_ERR(pg)) { while (i--) { unlock_page(pages[i]); put_page(pages[i]); } - err = -ENOMEM; + err = PTR_ERR(pg); goto out; } pages[i] = pg; diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index ff7f241a25b2..a1040060b081 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -980,11 +980,12 @@ static inline __le64 kernel2nt(const struct timespec64 *ts) */ static inline void nt2kernel(const __le64 tm, struct timespec64 *ts) { - u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970; + s32 t32; + /* use signed 64 bit to support timestamps prior to epoch. xfstest 258. */ + s64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
- // WARNING: do_div changes its first argument(!) - ts->tv_nsec = do_div(t, _100ns2seconds) * 100; - ts->tv_sec = t; + ts->tv_sec = div_s64_rem(t, _100ns2seconds, &t32); + ts->tv_nsec = t32 * 100; }
static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb) diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c index 662add939da7..66fb690d7751 100644 --- a/fs/ntfs3/run.c +++ b/fs/ntfs3/run.c @@ -984,8 +984,12 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, if (!dlcn) return -EINVAL;
- if (check_add_overflow(prev_lcn, dlcn, &lcn)) + /* Check special combination: 0 + SPARSE_LCN64. */ + if (!prev_lcn && dlcn == SPARSE_LCN64) { + lcn = SPARSE_LCN64; + } else if (check_add_overflow(prev_lcn, dlcn, &lcn)) { return -EINVAL; + } prev_lcn = lcn; } else { /* The size of 'dlcn' can't be > 8. */ diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index 6a0f6b0a3ab2..89d126c155c7 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -892,6 +892,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+ /* Set dummy blocksize to read boot_block. */ + if (!sb_min_blocksize(sb, PAGE_SIZE)) { + return -EINVAL; + } + read_boot: bh = ntfs_bread(sb, boot_block); if (!bh) diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 6ac4dcd54588..e93fc842bb20 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1992,6 +1992,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, }
cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; + if (!le16_to_cpu(cl->cl_next_free_rec) || + le16_to_cpu(cl->cl_next_free_rec) > le16_to_cpu(cl->cl_count)) { + status = ocfs2_error(ac->ac_inode->i_sb, + "Chain allocator dinode %llu has invalid next " + "free chain record %u, but only %u total\n", + (unsigned long long)le64_to_cpu(fe->i_blkno), + le16_to_cpu(cl->cl_next_free_rec), + le16_to_cpu(cl->cl_count)); + goto bail; + }
victim = ocfs2_find_victim_chain(cl); ac->ac_chain = victim; diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 17133adfe798..ee9c95811477 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -1011,6 +1011,8 @@ static int smb3_reconfigure(struct fs_context *fc) rc = smb3_sync_session_ctx_passwords(cifs_sb, ses); if (rc) { mutex_unlock(&ses->session_mutex); + kfree_sensitive(new_password); + kfree_sensitive(new_password2); return rc; }
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c index ecfc57508671..d3483d9c757c 100644 --- a/fs/smb/server/mgmt/tree_connect.c +++ b/fs/smb/server/mgmt/tree_connect.c @@ -78,7 +78,6 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name) tree_conn->t_state = TREE_NEW; status.tree_conn = tree_conn; atomic_set(&tree_conn->refcount, 1); - init_waitqueue_head(&tree_conn->refcount_q);
ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn, KSMBD_DEFAULT_GFP)); @@ -100,14 +99,8 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon) { - /* - * Checking waitqueue to releasing tree connect on - * tree disconnect. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - if (!atomic_dec_return(&tcon->refcount) && - waitqueue_active(&tcon->refcount_q)) - wake_up(&tcon->refcount_q); + if (atomic_dec_and_test(&tcon->refcount)) + kfree(tcon); }
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, @@ -119,14 +112,11 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, xa_erase(&sess->tree_conns, tree_conn->id); write_unlock(&sess->tree_conns_lock);
- if (!atomic_dec_and_test(&tree_conn->refcount)) - wait_event(tree_conn->refcount_q, - atomic_read(&tree_conn->refcount) == 0); - ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id); ksmbd_release_tree_conn_id(sess, tree_conn->id); ksmbd_share_config_put(tree_conn->share_conf); - kfree(tree_conn); + if (atomic_dec_and_test(&tree_conn->refcount)) + kfree(tree_conn); return ret; }
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h index a42cdd051041..f0023d86716f 100644 --- a/fs/smb/server/mgmt/tree_connect.h +++ b/fs/smb/server/mgmt/tree_connect.h @@ -33,7 +33,6 @@ struct ksmbd_tree_connect { int maximal_access; bool posix_extensions; atomic_t refcount; - wait_queue_head_t refcount_q; unsigned int t_state; };
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 00805aed0b07..66198ed26aec 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -325,8 +325,10 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn, sess = ksmbd_session_lookup(conn, id); if (!sess && conn->binding) sess = ksmbd_session_lookup_slowpath(id); - if (sess && sess->state != SMB2_SESSION_VALID) + if (sess && sess->state != SMB2_SESSION_VALID) { + ksmbd_user_session_put(sess); sess = NULL; + } return sess; }
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index cd42d2581266..e2cde9723001 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -2190,7 +2190,6 @@ int smb2_tree_disconnect(struct ksmbd_work *work) goto err_out; }
- WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount)); tcon->t_state = TREE_DISCONNECTED; write_unlock(&sess->tree_conns_lock);
@@ -2200,8 +2199,6 @@ int smb2_tree_disconnect(struct ksmbd_work *work) goto err_out; }
- work->tcon = NULL; - rsp->StructureSize = cpu_to_le16(4); err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_tree_disconnect_rsp)); @@ -2366,7 +2363,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, int rc = 0; unsigned int next = 0;
- if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 1 + le16_to_cpu(eabuf->EaValueLength)) return -EINVAL;
@@ -2443,7 +2440,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, break; }
- if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 1 + le16_to_cpu(eabuf->EaValueLength)) { rc = -EINVAL; break; @@ -4929,8 +4926,10 @@ static int get_file_all_info(struct ksmbd_work *work,
ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); - if (ret) + if (ret) { + kfree(filename); return ret; + }
ksmbd_debug(SMB, "filename = %s\n", filename); delete_pending = ksmbd_inode_pending_delete(fp); @@ -8107,7 +8106,7 @@ int smb2_ioctl(struct ksmbd_work *work) id = req->VolatileFileId;
if (req->Flags != cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL)) { - rsp->hdr.Status = STATUS_NOT_SUPPORTED; + ret = -EOPNOTSUPP; goto out; }
@@ -8127,8 +8126,9 @@ int smb2_ioctl(struct ksmbd_work *work) case FSCTL_DFS_GET_REFERRALS: case FSCTL_DFS_GET_REFERRALS_EX: /* Not support DFS yet */ + ret = -EOPNOTSUPP; rsp->hdr.Status = STATUS_FS_DRIVER_REQUIRED; - goto out; + goto out2; case FSCTL_CREATE_OR_GET_OBJECT_ID: { struct file_object_buf_type1_ioctl_rsp *obj_buf; @@ -8418,8 +8418,10 @@ int smb2_ioctl(struct ksmbd_work *work) rsp->hdr.Status = STATUS_BUFFER_TOO_SMALL; else if (ret < 0 || rsp->hdr.Status == 0) rsp->hdr.Status = STATUS_INVALID_PARAMETER; + +out2: smb2_set_err_rsp(work); - return 0; + return ret; }
/** diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 9d38a651431c..eec509d3a7fa 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -333,6 +333,9 @@ static int check_lock_range(struct file *filp, loff_t start, loff_t end, struct file_lock_context *ctx = locks_inode_context(file_inode(filp)); int error = 0;
+ if (start == end) + return 0; + if (!ctx || list_empty_careful(&ctx->flc_posix)) return 0;
@@ -839,7 +842,7 @@ int ksmbd_vfs_truncate(struct ksmbd_work *work, if (size < inode->i_size) { err = check_lock_range(filp, size, inode->i_size - 1, WRITE); - } else { + } else if (size > inode->i_size) { err = check_lock_range(filp, inode->i_size, size - 1, WRITE); } diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c index dfed6fce8904..6ef116585af6 100644 --- a/fs/smb/server/vfs_cache.c +++ b/fs/smb/server/vfs_cache.c @@ -112,40 +112,62 @@ int ksmbd_query_inode_status(struct dentry *dentry)
read_lock(&inode_hash_lock); ci = __ksmbd_inode_lookup(dentry); - if (ci) { - ret = KSMBD_INODE_STATUS_OK; - if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)) - ret = KSMBD_INODE_STATUS_PENDING_DELETE; - atomic_dec(&ci->m_count); - } read_unlock(&inode_hash_lock); + if (!ci) + return ret; + + down_read(&ci->m_lock); + if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)) + ret = KSMBD_INODE_STATUS_PENDING_DELETE; + else + ret = KSMBD_INODE_STATUS_OK; + up_read(&ci->m_lock); + + atomic_dec(&ci->m_count); return ret; }
bool ksmbd_inode_pending_delete(struct ksmbd_file *fp) { - return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)); + struct ksmbd_inode *ci = fp->f_ci; + int ret; + + down_read(&ci->m_lock); + ret = (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)); + up_read(&ci->m_lock); + + return ret; }
void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp) { - fp->f_ci->m_flags |= S_DEL_PENDING; + struct ksmbd_inode *ci = fp->f_ci; + + down_write(&ci->m_lock); + ci->m_flags |= S_DEL_PENDING; + up_write(&ci->m_lock); }
void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp) { - fp->f_ci->m_flags &= ~S_DEL_PENDING; + struct ksmbd_inode *ci = fp->f_ci; + + down_write(&ci->m_lock); + ci->m_flags &= ~S_DEL_PENDING; + up_write(&ci->m_lock); }
void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp, int file_info) { - if (ksmbd_stream_fd(fp)) { - fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM; - return; - } + struct ksmbd_inode *ci = fp->f_ci;
- fp->f_ci->m_flags |= S_DEL_ON_CLS; + down_write(&ci->m_lock); + if (ksmbd_stream_fd(fp)) + ci->m_flags |= S_DEL_ON_CLS_STREAM; + else + ci->m_flags |= S_DEL_ON_CLS; + up_write(&ci->m_lock); }
static void ksmbd_inode_hash(struct ksmbd_inode *ci) @@ -257,27 +279,41 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp) struct file *filp;
filp = fp->filp; - if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) { - ci->m_flags &= ~S_DEL_ON_CLS_STREAM; - err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp), - &filp->f_path, - fp->stream.name, - true); - if (err) - pr_err("remove xattr failed : %s\n", - fp->stream.name); + + if (ksmbd_stream_fd(fp)) { + bool remove_stream_xattr = false; + + down_write(&ci->m_lock); + if (ci->m_flags & S_DEL_ON_CLS_STREAM) { + ci->m_flags &= ~S_DEL_ON_CLS_STREAM; + remove_stream_xattr = true; + } + up_write(&ci->m_lock); + + if (remove_stream_xattr) { + err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp), + &filp->f_path, + fp->stream.name, + true); + if (err) + pr_err("remove xattr failed : %s\n", + fp->stream.name); + } }
if (atomic_dec_and_test(&ci->m_count)) { + bool do_unlink = false; + down_write(&ci->m_lock); if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) { ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING); - up_write(&ci->m_lock); - ksmbd_vfs_unlink(filp); - down_write(&ci->m_lock); + do_unlink = true; } up_write(&ci->m_lock);
+ if (do_unlink) + ksmbd_vfs_unlink(filp); + ksmbd_inode_free(ci); } } diff --git a/fs/xfs/scrub/attr_repair.c b/fs/xfs/scrub/attr_repair.c index c7eb94069caf..09d63aa10314 100644 --- a/fs/xfs/scrub/attr_repair.c +++ b/fs/xfs/scrub/attr_repair.c @@ -333,7 +333,6 @@ xrep_xattr_salvage_remote_attr( .attr_filter = ent->flags & XFS_ATTR_NSP_ONDISK_MASK, .namelen = rentry->namelen, .name = rentry->name, - .value = ab->value, .valuelen = be32_to_cpu(rentry->valuelen), }; unsigned int namesize; @@ -363,6 +362,7 @@ xrep_xattr_salvage_remote_attr( error = -EDEADLOCK; if (error) return error; + args.value = ab->value;
/* Look up the remote value and stash it for reconstruction. */ error = xfs_attr3_leaf_getvalue(leaf_bp, &args); diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c index f683b7a9323f..e0811f9e78dc 100644 --- a/fs/xfs/xfs_attr_item.c +++ b/fs/xfs/xfs_attr_item.c @@ -739,7 +739,7 @@ xfs_attr_recover_work( struct xfs_attri_log_item *attrip = ATTRI_ITEM(lip); struct xfs_attr_intent *attr; struct xfs_mount *mp = lip->li_log->l_mp; - struct xfs_inode *ip; + struct xfs_inode *ip = NULL; struct xfs_da_args *args; struct xfs_trans *tp; struct xfs_trans_res resv; diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 47549cfa61cd..f4b9cd959b68 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -900,6 +900,7 @@ xfs_buf_item_init( map_size = DIV_ROUND_UP(chunks, NBWORD);
if (map_size > XFS_BLF_DATAMAP_SIZE) { + xfs_buf_item_free_format(bip); kmem_cache_free(xfs_buf_item_cache, bip); xfs_err(mp, "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!", diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 3212b5bf3fb3..ee2530fabeeb 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -1134,7 +1134,7 @@ xfs_qm_quotacheck_dqadjust(
error = xfs_dquot_attach_buf(NULL, dqp); if (error) - return error; + goto out_unlock;
trace_xfs_dqadjust(dqp);
@@ -1164,8 +1164,9 @@ xfs_qm_quotacheck_dqadjust( }
dqp->q_flags |= XFS_DQFLAG_DIRTY; +out_unlock: xfs_qm_dqput(dqp); - return 0; + return error; }
/* diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index 513837632b7d..d7891d08f67a 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -10,6 +10,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/rbtree.h>
#include <drm/drm_print.h>
@@ -53,7 +54,11 @@ struct drm_buddy_block { * a list, if so desired. As soon as the block is freed with * drm_buddy_free* ownership is given back to the mm. */ - struct list_head link; + union { + struct rb_node rb; + struct list_head link; + }; + struct list_head tmp_link; };
@@ -68,7 +73,7 @@ struct drm_buddy_block { */ struct drm_buddy { /* Maintain a free list for each order. */ - struct list_head *free_list; + struct rb_root **free_trees;
/* * Maintain explicit binary tree(s) to track the allocation of the @@ -94,7 +99,7 @@ struct drm_buddy { };
static inline u64 -drm_buddy_block_offset(struct drm_buddy_block *block) +drm_buddy_block_offset(const struct drm_buddy_block *block) { return block->header & DRM_BUDDY_HEADER_OFFSET; } diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index eaac5e665892..e9e7c9d14376 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -333,6 +333,12 @@ struct drm_edid_ident { const char *name; };
+#define DRM_EDID_IDENT_INIT(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id, _name) \ +{ \ + .panel_id = drm_edid_encode_panel_id(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id), \ + .name = _name, \ +} + #define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
/* Short Audio Descriptor */ diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 5ca2d5699620..b9f19da37b08 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -97,27 +97,6 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, list_add(&page->lru, &balloon->pages); }
-/* - * balloon_page_delete - delete a page from balloon's page list and clear - * the page->private assignement accordingly. - * @page : page to be released from balloon's page list - * - * Caller must ensure the page is locked and the spin_lock protecting balloon - * pages list is held before deleting a page from the balloon device. - */ -static inline void balloon_page_delete(struct page *page) -{ - __ClearPageOffline(page); - __ClearPageMovable(page); - set_page_private(page, 0); - /* - * No touch page.lru field once @page has been isolated - * because VM is using the field. - */ - if (!PageIsolated(page)) - list_del(&page->lru); -} - /* * balloon_page_device - get the b_dev_info descriptor for the balloon device * that enqueues the given page. @@ -141,12 +120,6 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, list_add(&page->lru, &balloon->pages); }
-static inline void balloon_page_delete(struct page *page) -{ - __ClearPageOffline(page); - list_del(&page->lru); -} - static inline gfp_t balloon_mapping_gfp_mask(void) { return GFP_HIGHUSER; @@ -154,6 +127,22 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
#endif /* CONFIG_BALLOON_COMPACTION */
+/* + * balloon_page_finalize - prepare a balloon page that was removed from the + * balloon list for release to the page allocator + * @page: page to be released to the page allocator + * + * Caller must ensure that the page is locked. + */ +static inline void balloon_page_finalize(struct page *page) +{ + if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) { + __ClearPageMovable(page); + set_page_private(page, 0); + } + __ClearPageOffline(page); +} + /* * balloon_page_push - insert a page into a page list. * @head : pointer to list diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index beb2a1e1bac5..8e41eff78f42 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -13,6 +13,19 @@
#ifndef __ASSEMBLY__
+/* + * C23 introduces "auto" as a standard way to define type-inferred + * variables, but "auto" has been a (useless) keyword even since K&R C, + * so it has always been "namespace reserved." + * + * Until at some future time we require C23 support, we need the gcc + * extension __auto_type, but there is no reason to put that elsewhere + * in the source code. + */ +#if __STDC_VERSION__ < 202311L +# define auto __auto_type +#endif + /* * Skipped when running bindgen due to a libclang issue; * see https://github.com/rust-lang/rust-bindgen/issues/2244. diff --git a/include/linux/fs.h b/include/linux/fs.h index 37a01c9d9658..87720e1b5419 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3446,7 +3446,7 @@ struct offset_ctx { void simple_offset_init(struct offset_ctx *octx); int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry); void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry); -int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, +void simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int simple_offset_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 0bd581003cd5..60de63e46b33 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -44,6 +44,7 @@ struct gen_pool; * @nr: The number of zeroed bits we're looking for * @data: optional additional data used by the callback * @pool: the pool being allocated from + * @start_addr: start address of memory chunk */ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6caaa62d2b1f..1e186449dd04 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -337,6 +337,29 @@ static inline int hrtimer_callback_running(struct hrtimer *timer) return timer->base->running == timer; }
+/** + * hrtimer_update_function - Update the timer's callback function + * @timer: Timer to update + * @function: New callback function + * + * Only safe to call if the timer is not enqueued. Can be called in the callback function if the + * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED). + */ +static inline void hrtimer_update_function(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *)) +{ +#ifdef CONFIG_PROVE_LOCKING + guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock); + + if (WARN_ON_ONCE(hrtimer_is_queued(timer))) + return; + + if (WARN_ON_ONCE(!function)) + return; +#endif + timer->function = function; +} + /* Forward a hrtimer so it expires after now: */ extern u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 8aef9bb6ad57..cf1282f819ea 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1268,6 +1268,12 @@ struct journal_s */ struct lockdep_map j_trans_commit_map; #endif + /** + * @jbd2_trans_commit_key: + * + * "struct lock_class_key" for @j_trans_commit_map + */ + struct lock_class_key jbd2_trans_commit_key;
/** * @j_fc_cleanup_callback: diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6bbfc8aa42e8..4a54449dbfad 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t; #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) +#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u)
#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ @@ -607,6 +608,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start, __kasan_poison_vmalloc(start, size); }
+void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags); +static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ + if (kasan_enabled()) + __kasan_unpoison_vmap_areas(vms, nr_vms, flags); +} + #else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start, @@ -631,6 +642,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start, static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { }
+static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ } + #endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h index 9202f4b24343..e1b8018ba639 100644 --- a/include/linux/nfslocalio.h +++ b/include/linux/nfslocalio.h @@ -47,8 +47,8 @@ nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *, const fmode_t) __must_hold(rcu);
struct nfsd_localio_operations { - bool (*nfsd_serv_try_get)(struct net *); - void (*nfsd_serv_put)(struct net *); + bool (*nfsd_net_try_get)(struct net *); + void (*nfsd_net_put)(struct net *); struct nfsd_file *(*nfsd_open_local_fh)(struct net *, struct auth_domain *, struct rpc_clnt *, @@ -69,12 +69,12 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *, static inline void nfs_to_nfsd_net_put(struct net *net) { /* - * Once reference to nfsd_serv is dropped, NFSD could be - * unloaded, so ensure safe return from nfsd_file_put_local() - * by always taking RCU. + * Once reference to net (and associated nfsd_serv) is dropped, NFSD + * could be unloaded, so ensure safe return from nfsd_net_put() by + * always taking RCU. */ rcu_read_lock(); - nfs_to->nfsd_serv_put(net); + nfs_to->nfsd_net_put(net); rcu_read_unlock(); }
diff --git a/include/linux/reset.h b/include/linux/reset.h index 514ddf003efc..4b31d683776e 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,6 +2,7 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_
+#include <linux/bits.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/types.h> diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 5e0dd47a0412..cfb89989b369 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -1024,7 +1024,7 @@ struct sdw_stream_runtime { int m_rt_count; };
-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name); +struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type); void sdw_release_stream(struct sdw_stream_runtime *stream);
int sdw_compute_params(struct sdw_bus *bus); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index a3d8305e88a5..117e0f620d52 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -26,7 +26,9 @@ #include <crypto/aes.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ -#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE + +#define TPM2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE +#define TPM2_MAX_PCR_BANKS 8
struct tpm_chip; struct trusted_key_payload; @@ -68,7 +70,7 @@ enum tpm2_curves {
struct tpm_digest { u16 alg_id; - u8 digest[TPM_MAX_DIGEST_SIZE]; + u8 digest[TPM2_MAX_DIGEST_SIZE]; } __packed;
struct tpm_bank_info { @@ -188,7 +190,7 @@ struct tpm_chip { unsigned int groups_cnt;
u32 nr_allocated_banks; - struct tpm_bank_info *allocated_banks; + struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS]; #ifdef CONFIG_ACPI acpi_handle acpi_dev_handle; char ppi_version[TPM_PPI_VERSION_LEN + 1]; diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 1b861f2100b6..41b1c72745ca 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -235,7 +235,7 @@ bool tty_port_carrier_raised(struct tty_port *port); void tty_port_raise_dtr_rts(struct tty_port *port); void tty_port_lower_dtr_rts(struct tty_port *port); void tty_port_hangup(struct tty_port *port); -void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); +void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async); void tty_port_tty_wakeup(struct tty_port *port); int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, struct file *filp); @@ -254,4 +254,23 @@ static inline int tty_port_users(struct tty_port *port) return port->count + port->blocked_open; }
+/** + * tty_port_tty_hangup - helper to hang up a tty asynchronously + * @port: tty port + * @check_clocal: hang only ttys with %CLOCAL unset? + */ +static inline void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) +{ + __tty_port_tty_hangup(port, check_clocal, true); +} + +/** + * tty_port_tty_vhangup - helper to hang up a tty synchronously + * @port: tty port + */ +static inline void tty_port_tty_vhangup(struct tty_port *port) +{ + __tty_port_tty_hangup(port, false, false); +} + #endif diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 99da27c032d7..f889be76eaff 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -102,6 +102,13 @@ struct vfio_pci_core_device { struct rw_semaphore memory_lock; };
+enum vfio_pci_io_width { + VFIO_PCI_IO_WIDTH_1 = 1, + VFIO_PCI_IO_WIDTH_2 = 2, + VFIO_PCI_IO_WIDTH_4 = 4, + VFIO_PCI_IO_WIDTH_8 = 8, +}; + /* Will be exported for vfio pci drivers usage */ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, unsigned int type, unsigned int subtype, @@ -137,7 +144,8 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite); + size_t x_end, bool iswrite, + enum vfio_pci_io_width max_width); bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, loff_t reg_start, size_t reg_cnt, loff_t *buf_offset, diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 0af330cf91c3..2e55a13ed3bb 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -192,8 +192,7 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); * other instances to take control of the device. * * This function has to be called only after &v4l2_m2m_ops->device_run - * callback has been called on the driver. To prevent recursion, it should - * not be called directly from the &v4l2_m2m_ops->device_run callback though. + * callback has been called on the driver. */ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx); diff --git a/include/net/ip.h b/include/net/ip.h index 5f0f1215d2f9..c65ca2765e29 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -470,12 +470,14 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { const struct rtable *rt = dst_rtable(dst); + const struct net_device *dev; unsigned int mtu, res; struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst)); + dev = dst_dev_rcu(dst); + net = dev_net_rcu(dev); if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || ip_mtu_locked(dst) || !forwarding) { @@ -489,7 +491,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, if (mtu) goto out;
- mtu = READ_ONCE(dst_dev(dst)->mtu); + mtu = READ_ONCE(dev->mtu);
if (unlikely(ip_mtu_locked(dst))) { if (rt->rt_uses_gateway && mtu > 576) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 6dbdf60b342f..59f48ca3abdf 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -274,7 +274,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) unsigned int mtu;
if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) { - mtu = READ_ONCE(dst->dev->mtu); + mtu = READ_ONCE(dst_dev(dst)->mtu); mtu -= lwtunnel_headroom(dst->lwtstate, mtu); } else { mtu = dst_mtu(dst); @@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
mtu = IPV6_MIN_MTU; rcu_read_lock(); - idev = __in6_dev_get(dst->dev); + idev = __in6_dev_get(dst_dev_rcu(dst)); if (idev) mtu = READ_ONCE(idev->cnf.mtu6); rcu_read_unlock(); diff --git a/include/net/route.h b/include/net/route.h index 232b7bf55ba2..cbb4d5523062 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -369,7 +369,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) const struct net *net;
rcu_read_lock(); - net = dev_net_rcu(dst_dev(dst)); + net = dst_dev_net_rcu(dst); hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); rcu_read_unlock(); } diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 4a8a4a63e99c..05f01ad0bfd9 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1281,6 +1281,7 @@ struct drm_xe_exec { /** @exec_queue_id: Exec queue ID for the batch buffer */ __u32 exec_queue_id;
+#define DRM_XE_MAX_SYNCS 1024 /** @num_syncs: Amount of struct drm_xe_sync in array. */ __u32 num_syncs;
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 5fd5b4cf75ca..e0fe5160d8bf 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -38,6 +38,7 @@ #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) #define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) #define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) +#define MPTCP_PM_ADDR_FLAGS_MASK GENMASK(4, 0)
struct mptcp_info { __u8 mptcpi_subflows; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 68439eb0dc8f..adf2b0a1bb59 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2421,6 +2421,9 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer) goto out_wake; }
+ /* any generated CQE posted past this time should wake us up */ + iowq->cq_tail = iowq->cq_min_tail; + iowq->t.function = io_cqring_timer_wakeup; hrtimer_set_expires(timer, iowq->timeout); return HRTIMER_RESTART; diff --git a/io_uring/openclose.c b/io_uring/openclose.c index e3357dfa14ca..cea2d8600d8e 100644 --- a/io_uring/openclose.c +++ b/io_uring/openclose.c @@ -70,13 +70,13 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe open->filename = NULL; return ret; } + req->flags |= REQ_F_NEED_CLEANUP;
open->file_slot = READ_ONCE(sqe->file_index); if (open->file_slot && (open->how.flags & O_CLOEXEC)) return -EINVAL;
open->nofile = rlimit(RLIMIT_NOFILE); - req->flags |= REQ_F_NEED_CLEANUP; if (io_openat_force_async(open)) req->flags |= REQ_F_FORCE_ASYNC; return 0; diff --git a/io_uring/poll.c b/io_uring/poll.c index bfdb537572f7..c833fd18d0ef 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -1038,12 +1038,17 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED); /* successfully updated, don't complete poll request */ - if (!ret2 || ret2 == -EIOCBQUEUED) + if (ret2 == IOU_ISSUE_SKIP_COMPLETE) goto out; + /* request completed as part of the update, complete it */ + else if (ret2 == IOU_OK) + goto complete; }
- req_set_fail(preq); io_req_set_res(preq, -ECANCELED, 0); +complete: + if (preq->cqe.res < 0) + req_set_fail(preq); preq->io_task_work.func = io_req_task_complete; io_req_task_work_add(preq); out: diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index a9a0ca605d4a..9e4bf061bb83 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -103,8 +103,11 @@ static char kallsyms_get_symbol_type(unsigned int off) { /* * Get just the first code, look it up in the token table, - * and return the first char from this token. + * and return the first char from this token. If MSB of length + * is 1, it is a "big" symbol, so needs an additional byte. */ + if (kallsyms_names[off] & 0x80) + off++; return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; }
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 3c21c31796db..077e078032e0 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -90,8 +90,14 @@ static struct klp_func *klp_find_func(struct klp_object *obj, struct klp_func *func;
klp_for_each_func(obj, func) { + /* + * Besides identical old_sympos, also consider old_sympos + * of 0 and 1 are identical. + */ if ((strcmp(old_func->old_name, func->old_name) == 0) && - (old_func->old_sympos == func->old_sympos)) { + ((old_func->old_sympos == func->old_sympos) || + (old_func->old_sympos == 0 && func->old_sympos == 1) || + (old_func->old_sympos == 1 && func->old_sympos == 0))) { return func; } } diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 95baa12a1029..59d7b4f48c08 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -165,12 +165,13 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, * cpudl_clear - remove a CPU from the cpudl max-heap * @cp: the cpudl max-heap context * @cpu: the target CPU + * @online: the online state of the deadline runqueue * * Notes: assumes cpu_rq(cpu)->lock is locked * * Returns: (void) */ -void cpudl_clear(struct cpudl *cp, int cpu) +void cpudl_clear(struct cpudl *cp, int cpu, bool online) { int old_idx, new_cpu; unsigned long flags; @@ -183,7 +184,7 @@ void cpudl_clear(struct cpudl *cp, int cpu) if (old_idx == IDX_INVALID) { /* * Nothing to remove if old_idx was invalid. - * This could happen if a rq_offline_dl is + * This could happen if rq_online_dl or rq_offline_dl is * called for a CPU without -dl tasks running. */ } else { @@ -194,9 +195,12 @@ void cpudl_clear(struct cpudl *cp, int cpu) cp->elements[new_cpu].idx = old_idx; cp->elements[cpu].idx = IDX_INVALID; cpudl_heapify(cp, old_idx); - - cpumask_set_cpu(cpu, cp->free_cpus); } + if (likely(online)) + __cpumask_set_cpu(cpu, cp->free_cpus); + else + __cpumask_clear_cpu(cpu, cp->free_cpus); + raw_spin_unlock_irqrestore(&cp->lock, flags); }
@@ -227,7 +231,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) cp->elements[new_idx].cpu = cpu; cp->elements[cpu].idx = new_idx; cpudl_heapify_up(cp, new_idx); - cpumask_clear_cpu(cpu, cp->free_cpus); + __cpumask_clear_cpu(cpu, cp->free_cpus); } else { cp->elements[old_idx].dl = dl; cpudl_heapify(cp, old_idx); @@ -236,26 +240,6 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) raw_spin_unlock_irqrestore(&cp->lock, flags); }
-/* - * cpudl_set_freecpu - Set the cpudl.free_cpus - * @cp: the cpudl max-heap context - * @cpu: rd attached CPU - */ -void cpudl_set_freecpu(struct cpudl *cp, int cpu) -{ - cpumask_set_cpu(cpu, cp->free_cpus); -} - -/* - * cpudl_clear_freecpu - Clear the cpudl.free_cpus - * @cp: the cpudl max-heap context - * @cpu: rd attached CPU - */ -void cpudl_clear_freecpu(struct cpudl *cp, int cpu) -{ - cpumask_clear_cpu(cpu, cp->free_cpus); -} - /* * cpudl_init - initialize the cpudl structure * @cp: the cpudl max-heap context diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index 0adeda93b5fb..ecff718d94ae 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -18,9 +18,7 @@ struct cpudl { #ifdef CONFIG_SMP int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask); void cpudl_set(struct cpudl *cp, int cpu, u64 dl); -void cpudl_clear(struct cpudl *cp, int cpu); +void cpudl_clear(struct cpudl *cp, int cpu, bool online); int cpudl_init(struct cpudl *cp); -void cpudl_set_freecpu(struct cpudl *cp, int cpu); -void cpudl_clear_freecpu(struct cpudl *cp, int cpu); void cpudl_cleanup(struct cpudl *cp); #endif /* CONFIG_SMP */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 6ec66fef3f91..abd0fb2d839c 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1852,7 +1852,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) if (!dl_rq->dl_nr_running) { dl_rq->earliest_dl.curr = 0; dl_rq->earliest_dl.next = 0; - cpudl_clear(&rq->rd->cpudl, rq->cpu); + cpudl_clear(&rq->rd->cpudl, rq->cpu, rq->online); cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); } else { struct rb_node *leftmost = rb_first_cached(&dl_rq->root); @@ -2950,9 +2950,10 @@ static void rq_online_dl(struct rq *rq) if (rq->dl.overloaded) dl_set_overload(rq);
- cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); if (rq->dl.dl_nr_running > 0) cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); + else + cpudl_clear(&rq->rd->cpudl, rq->cpu, true); }
/* Assumes rq->lock is held */ @@ -2961,8 +2962,7 @@ static void rq_offline_dl(struct rq *rq) if (rq->dl.overloaded) dl_clear_overload(rq);
- cpudl_clear(&rq->rd->cpudl, rq->cpu); - cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); + cpudl_clear(&rq->rd->cpudl, rq->cpu, false); }
void __init init_sched_dl_class(void) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 9815f9a0cd59..72958b31549f 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -804,7 +804,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { - s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread; + s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread; struct sched_entity *last, *first, *root; struct rq *rq = cpu_rq(cpu); unsigned long flags; @@ -827,15 +827,15 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) last = __pick_last_entity(cfs_rq); if (last) right_vruntime = last->vruntime; - min_vruntime = cfs_rq->min_vruntime; + zero_vruntime = cfs_rq->zero_vruntime; raw_spin_rq_unlock_irqrestore(rq, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline", SPLIT_NS(left_deadline)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime", SPLIT_NS(left_vruntime)); - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", - SPLIT_NS(min_vruntime)); + SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime", + SPLIT_NS(zero_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime", SPLIT_NS(avg_vruntime(cfs_rq))); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime", diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ad1d438b3085..9f03255ec910 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1057,6 +1057,14 @@ static struct scx_dispatch_q *find_user_dsq(u64 dsq_id) return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params); }
+static const struct sched_class *scx_setscheduler_class(struct task_struct *p) +{ + if (p->sched_class == &stop_sched_class) + return &stop_sched_class; + + return __setscheduler_class(p->policy, p->prio); +} + /* * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate @@ -1668,6 +1676,30 @@ static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) WRITE_ONCE(dsq->nr, dsq->nr + delta); }
+static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p, + u64 enq_flags) +{ + struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); + bool preempt = false; + + /* + * If @rq is in balance, the CPU is already vacant and looking for the + * next task to run. No need to preempt or trigger resched after moving + * @p into its local DSQ. + */ + if (rq->scx.flags & SCX_RQ_IN_BALANCE) + return; + + if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && + rq->curr->sched_class == &ext_sched_class) { + rq->curr->scx.slice = 0; + preempt = true; + } + + if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class)) + resched_curr(rq); +} + static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, u64 enq_flags) { @@ -1765,22 +1797,10 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, if (enq_flags & SCX_ENQ_CLEAR_OPSS) atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
- if (is_local) { - struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); - bool preempt = false; - - if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && - rq->curr->sched_class == &ext_sched_class) { - rq->curr->scx.slice = 0; - preempt = true; - } - - if (preempt || sched_class_above(&ext_sched_class, - rq->curr->sched_class)) - resched_curr(rq); - } else { + if (is_local) + local_dsq_post_enq(dsq, p, enq_flags); + else raw_spin_unlock(&dsq->lock); - } }
static void task_unlink_from_dsq(struct task_struct *p, @@ -2247,6 +2267,8 @@ static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
dsq_mod_nr(dst_dsq, 1); p->scx.dsq = dst_dsq; + + local_dsq_post_enq(dst_dsq, p, enq_flags); }
#ifdef CONFIG_SMP @@ -4653,8 +4675,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work) scx_task_iter_start(&sti); while ((p = scx_task_iter_next_locked(&sti))) { const struct sched_class *old_class = p->sched_class; - const struct sched_class *new_class = - __setscheduler_class(p->policy, p->prio); + const struct sched_class *new_class = scx_setscheduler_class(p); struct sched_enq_and_set_ctx ctx;
if (old_class != new_class && p->se.sched_delayed) @@ -5368,8 +5389,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) scx_task_iter_start(&sti); while ((p = scx_task_iter_next_locked(&sti))) { const struct sched_class *old_class = p->sched_class; - const struct sched_class *new_class = - __setscheduler_class(p->policy, p->prio); + const struct sched_class *new_class = scx_setscheduler_class(p); struct sched_enq_and_set_ctx ctx;
if (!tryget_task_struct(p)) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 62b8c7e914eb..22dc54aab8dd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -553,7 +553,7 @@ static inline bool entity_before(const struct sched_entity *a,
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) { - return (s64)(se->vruntime - cfs_rq->min_vruntime); + return (s64)(se->vruntime - cfs_rq->zero_vruntime); }
#define __node_2_se(node) \ @@ -605,13 +605,13 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) * * Which we track using: * - * v0 := cfs_rq->min_vruntime + * v0 := cfs_rq->zero_vruntime * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime * \Sum w_i := cfs_rq->avg_load * - * Since min_vruntime is a monotonic increasing variable that closely tracks - * the per-task service, these deltas: (v_i - v), will be in the order of the - * maximal (virtual) lag induced in the system due to quantisation. + * Since zero_vruntime closely tracks the per-task service, these + * deltas: (v_i - v), will be in the order of the maximal (virtual) lag + * induced in the system due to quantisation. * * Also, we use scale_load_down() to reduce the size. * @@ -670,7 +670,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq) avg = div_s64(avg, load); }
- return cfs_rq->min_vruntime + avg; + return cfs_rq->zero_vruntime + avg; }
/* @@ -736,7 +736,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime) load += weight; }
- return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load; + return avg >= (s64)(vruntime - cfs_rq->zero_vruntime) * load; }
int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -744,42 +744,14 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) return vruntime_eligible(cfs_rq, se->vruntime); }
-static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) +static void update_zero_vruntime(struct cfs_rq *cfs_rq) { - u64 min_vruntime = cfs_rq->min_vruntime; - /* - * open coded max_vruntime() to allow updating avg_vruntime - */ - s64 delta = (s64)(vruntime - min_vruntime); - if (delta > 0) { - avg_vruntime_update(cfs_rq, delta); - min_vruntime = vruntime; - } - return min_vruntime; -} + u64 vruntime = avg_vruntime(cfs_rq); + s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
-static void update_min_vruntime(struct cfs_rq *cfs_rq) -{ - struct sched_entity *se = __pick_root_entity(cfs_rq); - struct sched_entity *curr = cfs_rq->curr; - u64 vruntime = cfs_rq->min_vruntime; - - if (curr) { - if (curr->on_rq) - vruntime = curr->vruntime; - else - curr = NULL; - } - - if (se) { - if (!curr) - vruntime = se->min_vruntime; - else - vruntime = min_vruntime(vruntime, se->min_vruntime); - } + avg_vruntime_update(cfs_rq, delta);
- /* ensure we never gain time by being placed backwards. */ - cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime); + cfs_rq->zero_vruntime = vruntime; }
static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq) @@ -852,6 +824,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity, static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { avg_vruntime_add(cfs_rq, se); + update_zero_vruntime(cfs_rq); se->min_vruntime = se->vruntime; se->min_slice = se->slice; rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, @@ -863,6 +836,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, &min_vruntime_cb); avg_vruntime_sub(cfs_rq, se); + update_zero_vruntime(cfs_rq); }
struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq) @@ -1243,7 +1217,6 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->vruntime += calc_delta_fair(delta_exec, curr); resched = update_deadline(cfs_rq, curr); - update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) { struct task_struct *p = task_of(curr); @@ -3937,15 +3910,6 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, update_load_add(&cfs_rq->load, se->load.weight); if (!curr) __enqueue_entity(cfs_rq, se); - - /* - * The entity's vruntime has been adjusted, so let's check - * whether the rq-wide min_vruntime needs updated too. Since - * the calculations above require stable min_vruntime rather - * than up-to-date one, we do the update at the end of the - * reweight process. - */ - update_min_vruntime(cfs_rq); } }
@@ -5614,15 +5578,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_cfs_group(se);
- /* - * Now advance min_vruntime if @se was the entity holding it back, - * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be - * put back on, and if we advance min_vruntime, we'll be placed back - * further than we started -- i.e. we'll be penalized. - */ - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) - update_min_vruntime(cfs_rq); - if (flags & DEQUEUE_DELAYED) finish_delayed_dequeue_entity(se);
@@ -9165,7 +9120,6 @@ static void yield_task_fair(struct rq *rq) if (entity_eligible(cfs_rq, se)) { se->vruntime = se->deadline; se->deadline += calc_delta_fair(se->slice, se); - update_min_vruntime(cfs_rq); } }
@@ -12238,14 +12192,8 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost) /* * Track max cost of a domain to make sure to not delay the * next wakeup on the CPU. - * - * sched_balance_newidle() bumps the cost whenever newidle - * balance fails, and we don't want things to grow out of - * control. Use the sysctl_sched_migration_cost as the upper - * limit, plus a litle extra to avoid off by ones. */ - sd->max_newidle_lb_cost = - min(cost, sysctl_sched_migration_cost + 200); + sd->max_newidle_lb_cost = cost; sd->last_decay_max_lb_cost = jiffies; } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) { /* @@ -12950,17 +12898,10 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
t1 = sched_clock_cpu(this_cpu); domain_cost = t1 - t0; + update_newidle_cost(sd, domain_cost); + curr_cost += domain_cost; t0 = t1; - - /* - * Failing newidle means it is not effective; - * bump the cost so we end up doing less of it. - */ - if (!pulled_task) - domain_cost = (3 * sd->max_newidle_lb_cost) / 2; - - update_newidle_cost(sd, domain_cost); }
/* @@ -13106,7 +13047,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr) }
/* - * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed. + * se_fi_update - Update the cfs_rq->zero_vruntime_fi in a CFS hierarchy if needed. */ static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq, bool forceidle) @@ -13120,7 +13061,7 @@ static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq, cfs_rq->forceidle_seq = fi_seq; }
- cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; + cfs_rq->zero_vruntime_fi = cfs_rq->zero_vruntime; } }
@@ -13173,11 +13114,11 @@ bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
/* * Find delta after normalizing se's vruntime with its cfs_rq's - * min_vruntime_fi, which would have been updated in prior calls + * zero_vruntime_fi, which would have been updated in prior calls * to se_fi_update(). */ delta = (s64)(sea->vruntime - seb->vruntime) + - (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi); + (s64)(cfs_rqb->zero_vruntime_fi - cfs_rqa->zero_vruntime_fi);
return delta > 0; } @@ -13415,7 +13356,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline = RB_ROOT_CACHED; - cfs_rq->min_vruntime = (u64)(-(1LL << 20)); + cfs_rq->zero_vruntime = (u64)(-(1LL << 20)); #ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); #endif diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 6ad6717084ed..c437a1502623 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1895,6 +1895,26 @@ static int find_lowest_rq(struct task_struct *task) return -1; }
+static struct task_struct *pick_next_pushable_task(struct rq *rq) +{ + struct task_struct *p; + + if (!has_pushable_tasks(rq)) + return NULL; + + p = plist_first_entry(&rq->rt.pushable_tasks, + struct task_struct, pushable_tasks); + + BUG_ON(rq->cpu != task_cpu(p)); + BUG_ON(task_current(rq, p)); + BUG_ON(p->nr_cpus_allowed <= 1); + + BUG_ON(!task_on_rq_queued(p)); + BUG_ON(!rt_task(p)); + + return p; +} + /* Will lock the rq it finds */ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { @@ -1925,18 +1945,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) /* * We had to unlock the run queue. In * the mean time, task could have - * migrated already or had its affinity changed. - * Also make sure that it wasn't scheduled on its rq. + * migrated already or had its affinity changed, + * therefore check if the task is still at the + * head of the pushable tasks list. * It is possible the task was scheduled, set * "migrate_disabled" and then got preempted, so we must * check the task migration disable flag here too. */ - if (unlikely(task_rq(task) != rq || + if (unlikely(is_migration_disabled(task) || !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || - task_on_cpu(rq, task) || - !rt_task(task) || - is_migration_disabled(task) || - !task_on_rq_queued(task))) { + task != pick_next_pushable_task(rq))) {
double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; @@ -1956,26 +1974,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) return lowest_rq; }
-static struct task_struct *pick_next_pushable_task(struct rq *rq) -{ - struct task_struct *p; - - if (!has_pushable_tasks(rq)) - return NULL; - - p = plist_first_entry(&rq->rt.pushable_tasks, - struct task_struct, pushable_tasks); - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); - BUG_ON(p->nr_cpus_allowed <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!rt_task(p)); - - return p; -} - /* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cf541c4502d9..6070331772ea 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -660,10 +660,10 @@ struct cfs_rq { s64 avg_vruntime; u64 avg_load;
- u64 min_vruntime; + u64 zero_vruntime; #ifdef CONFIG_SCHED_CORE unsigned int forceidle_seq; - u64 min_vruntime_fi; + u64 zero_vruntime_fi; #endif
struct rb_root_cached tasks_timeline; diff --git a/kernel/scs.c b/kernel/scs.c index d7809affe740..772488afd5b9 100644 --- a/kernel/scs.c +++ b/kernel/scs.c @@ -135,7 +135,7 @@ static void scs_check_usage(struct task_struct *tsk) if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE)) return;
- for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) { + for (p = task_scs(tsk); p < __scs_magic(task_scs(tsk)); ++p) { if (!READ_ONCE_NOCHECK(*p)) break; used += sizeof(*p); diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 988a4c4ba97b..910da0e4531a 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -943,6 +943,7 @@ void fgraph_init_ops(struct ftrace_ops *dst_ops, mutex_init(&dst_ops->local_hash.regex_lock); INIT_LIST_HEAD(&dst_ops->subop_list); dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED; + dst_ops->private = src_ops->private; } #endif } @@ -1285,6 +1286,13 @@ int register_ftrace_graph(struct fgraph_ops *gops)
ftrace_graph_active++;
+ /* Always save the function, and reset at unregistering */ + gops->saved_func = gops->entryfunc; +#ifdef CONFIG_DYNAMIC_FTRACE + if (ftrace_pids_enabled(&gops->ops)) + gops->entryfunc = fgraph_pid_func; +#endif + if (ftrace_graph_active == 2) ftrace_graph_disable_direct(true);
@@ -1304,8 +1312,6 @@ int register_ftrace_graph(struct fgraph_ops *gops) } else { init_task_vars(gops->idx); } - /* Always save the function, and reset at unregistering */ - gops->saved_func = gops->entryfunc;
ret = ftrace_startup_subops(&graph_ops, &gops->ops, command); if (!ret) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index dbea76058863..a3d7067eae65 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -689,6 +689,8 @@ int trace_event_reg(struct trace_event_call *call,
#ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: + if (!call->class->perf_probe) + return -ENODEV; return tracepoint_probe_register(call->tp, call->class->perf_probe, call); diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 1b9e32f6442f..9f3fcbfc4cd7 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -382,7 +382,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, n_u64++; } else { trace_seq_printf(s, print_fmt, se->fields[i]->name, - STR_VAR_LEN_MAX, (char *)&entry->fields[n_u64].as_u64, i == se->n_fields - 1 ? "" : " "); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); diff --git a/lib/idr.c b/lib/idr.c index da36054c3ca0..aad464c36369 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -40,6 +40,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR))) idr->idr_rt.xa_flags |= IDR_RT_MARKER; + if (max < base) + return -ENOSPC;
id = (id < base) ? 0 : id - base; radix_tree_iter_init(&iter, id); diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 6597ebea8ae2..009704f257f1 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -93,13 +93,8 @@ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, if (!trylock_page(page)) continue;
- if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) && - PageIsolated(page)) { - /* raced with isolation */ - unlock_page(page); - continue; - } - balloon_page_delete(page); + list_del(&page->lru); + balloon_page_finalize(page); __count_vm_event(BALLOON_DEFLATE); list_add(&page->lru, pages); unlock_page(page); diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index cf22e09a3507..d7737f5df668 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -20,11 +20,17 @@ static void damon_test_regions(struct kunit *test) struct damon_target *t;
r = damon_new_region(1, 2); + if (!r) + kunit_skip(test, "region alloc fail"); KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
t = damon_new_target(); + if (!t) { + damon_free_region(r); + kunit_skip(test, "target alloc fail"); + } KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
damon_add_region(r, t); @@ -52,7 +58,14 @@ static void damon_test_target(struct kunit *test) struct damon_ctx *c = damon_new_ctx(); struct damon_target *t;
+ if (!c) + kunit_skip(test, "ctx alloc fail"); + t = damon_new_target(); + if (!t) { + damon_destroy_ctx(c); + kunit_skip(test, "target alloc fail"); + } KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
damon_add_target(c, t); @@ -84,8 +97,15 @@ static void damon_test_aggregate(struct kunit *test) struct damon_region *r; int it, ir;
+ if (!ctx) + kunit_skip(test, "ctx alloc fail"); + for (it = 0; it < 3; it++) { t = damon_new_target(); + if (!t) { + damon_destroy_ctx(ctx); + kunit_skip(test, "target alloc fail"); + } damon_add_target(ctx, t); }
@@ -93,6 +113,10 @@ static void damon_test_aggregate(struct kunit *test) damon_for_each_target(t, ctx) { for (ir = 0; ir < 3; ir++) { r = damon_new_region(saddr[it][ir], eaddr[it][ir]); + if (!r) { + damon_destroy_ctx(ctx); + kunit_skip(test, "region alloc fail"); + } r->nr_accesses = accesses[it][ir]; r->nr_accesses_bp = accesses[it][ir] * 10000; damon_add_region(r, t); @@ -124,8 +148,19 @@ static void damon_test_split_at(struct kunit *test) struct damon_target *t; struct damon_region *r, *r_new;
+ if (!c) + kunit_skip(test, "ctx alloc fail"); t = damon_new_target(); + if (!t) { + damon_destroy_ctx(c); + kunit_skip(test, "target alloc fail"); + } r = damon_new_region(0, 100); + if (!r) { + damon_destroy_ctx(c); + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + } r->nr_accesses_bp = 420000; r->nr_accesses = 42; r->last_nr_accesses = 15; @@ -153,11 +188,21 @@ static void damon_test_merge_two(struct kunit *test) int i;
t = damon_new_target(); + if (!t) + kunit_skip(test, "target alloc fail"); r = damon_new_region(0, 100); + if (!r) { + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + } r->nr_accesses = 10; r->nr_accesses_bp = 100000; damon_add_region(r, t); r2 = damon_new_region(100, 300); + if (!r2) { + damon_free_target(t); + kunit_skip(test, "second region alloc fail"); + } r2->nr_accesses = 20; r2->nr_accesses_bp = 200000; damon_add_region(r2, t); @@ -203,8 +248,14 @@ static void damon_test_merge_regions_of(struct kunit *test) int i;
t = damon_new_target(); + if (!t) + kunit_skip(test, "target alloc fail"); for (i = 0; i < ARRAY_SIZE(sa); i++) { r = damon_new_region(sa[i], ea[i]); + if (!r) { + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + } r->nr_accesses = nrs[i]; r->nr_accesses_bp = nrs[i] * 10000; damon_add_region(r, t); @@ -227,15 +278,35 @@ static void damon_test_split_regions_of(struct kunit *test) struct damon_target *t; struct damon_region *r;
+ if (!c) + kunit_skip(test, "ctx alloc fail"); t = damon_new_target(); + if (!t) { + damon_destroy_ctx(c); + kunit_skip(test, "target alloc fail"); + } r = damon_new_region(0, 22); + if (!r) { + damon_destroy_ctx(c); + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + } damon_add_region(r, t); damon_split_regions_of(t, 2); KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); damon_free_target(t);
t = damon_new_target(); + if (!t) { + damon_destroy_ctx(c); + kunit_skip(test, "second target alloc fail"); + } r = damon_new_region(0, 220); + if (!r) { + damon_destroy_ctx(c); + damon_free_target(t); + kunit_skip(test, "second region alloc fail"); + } damon_add_region(r, t); damon_split_regions_of(t, 4); KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); @@ -249,6 +320,9 @@ static void damon_test_ops_registration(struct kunit *test) struct damon_operations ops = {.id = DAMON_OPS_VADDR}, bak; bool need_cleanup = false;
+ if (!c) + kunit_skip(test, "ctx alloc fail"); + /* DAMON_OPS_VADDR is registered only if CONFIG_DAMON_VADDR is set */ if (!damon_is_registered_ops(DAMON_OPS_VADDR)) { bak.id = DAMON_OPS_VADDR; @@ -294,13 +368,26 @@ static void damon_test_ops_registration(struct kunit *test) static void damon_test_set_regions(struct kunit *test) { struct damon_target *t = damon_new_target(); - struct damon_region *r1 = damon_new_region(4, 16); - struct damon_region *r2 = damon_new_region(24, 32); + struct damon_region *r1, *r2; struct damon_addr_range range = {.start = 8, .end = 28}; unsigned long expects[] = {8, 16, 16, 24, 24, 28}; int expect_idx = 0; struct damon_region *r;
+ if (!t) + kunit_skip(test, "target alloc fail"); + r1 = damon_new_region(4, 16); + if (!r1) { + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + } + r2 = damon_new_region(24, 32); + if (!r2) { + damon_free_target(t); + damon_free_region(r1); + kunit_skip(test, "second region alloc fail"); + } + damon_add_region(r1, t); damon_add_region(r2, t); damon_set_regions(t, &range, 1); @@ -342,6 +429,9 @@ static void damon_test_update_monitoring_result(struct kunit *test) struct damon_attrs new_attrs; struct damon_region *r = damon_new_region(3, 7);
+ if (!r) + kunit_skip(test, "region alloc fail"); + r->nr_accesses = 15; r->nr_accesses_bp = 150000; r->age = 20; @@ -375,6 +465,9 @@ static void damon_test_set_attrs(struct kunit *test) .sample_interval = 5000, .aggr_interval = 100000,}; struct damon_attrs invalid_attrs;
+ if (!c) + kunit_skip(test, "ctx alloc fail"); + KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &valid_attrs), 0);
invalid_attrs = valid_attrs; @@ -412,6 +505,8 @@ static void damos_test_new_filter(struct kunit *test) struct damos_filter *filter;
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); + if (!filter) + kunit_skip(test, "filter alloc fail"); KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); KUNIT_EXPECT_EQ(test, filter->matching, true); KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); diff --git a/mm/damon/tests/sysfs-kunit.h b/mm/damon/tests/sysfs-kunit.h index 7b5c7b307da9..ce7218469f20 100644 --- a/mm/damon/tests/sysfs-kunit.h +++ b/mm/damon/tests/sysfs-kunit.h @@ -45,16 +45,41 @@ static void damon_sysfs_test_add_targets(struct kunit *test) struct damon_ctx *ctx;
sysfs_targets = damon_sysfs_targets_alloc(); + if (!sysfs_targets) + kunit_skip(test, "sysfs_targets alloc fail"); sysfs_targets->nr = 1; sysfs_targets->targets_arr = kmalloc_array(1, sizeof(*sysfs_targets->targets_arr), GFP_KERNEL); + if (!sysfs_targets->targets_arr) { + kfree(sysfs_targets); + kunit_skip(test, "targets_arr alloc fail"); + }
sysfs_target = damon_sysfs_target_alloc(); + if (!sysfs_target) { + kfree(sysfs_targets->targets_arr); + kfree(sysfs_targets); + kunit_skip(test, "sysfs_target alloc fail"); + } sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100); sysfs_target->regions = damon_sysfs_regions_alloc(); + if (!sysfs_target->regions) { + kfree(sysfs_targets->targets_arr); + kfree(sysfs_targets); + kfree(sysfs_target); + kunit_skip(test, "sysfs_regions alloc fail"); + } + sysfs_targets->targets_arr[0] = sysfs_target;
ctx = damon_new_ctx(); + if (!ctx) { + kfree(sysfs_targets->targets_arr); + kfree(sysfs_targets); + kfree(sysfs_target); + kfree(sysfs_target->regions); + kunit_skip(test, "ctx alloc fail"); + }
damon_sysfs_add_targets(ctx, sysfs_targets); KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx)); diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h index a149e354bb26..ed2845d9f9a9 100644 --- a/mm/damon/tests/vaddr-kunit.h +++ b/mm/damon/tests/vaddr-kunit.h @@ -136,8 +136,14 @@ static void damon_do_test_apply_three_regions(struct kunit *test, int i;
t = damon_new_target(); + if (!t) + kunit_skip(test, "target alloc fail"); for (i = 0; i < nr_regions / 2; i++) { r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); + if (!r) { + damon_destroy_target(t); + kunit_skip(test, "region alloc fail"); + } damon_add_region(r, t); }
@@ -250,7 +256,16 @@ static void damon_test_split_evenly_fail(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) { struct damon_target *t = damon_new_target(); - struct damon_region *r = damon_new_region(start, end); + struct damon_region *r; + + if (!t) + kunit_skip(test, "target alloc fail"); + + r = damon_new_region(start, end); + if (!r) { + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + }
damon_add_region(r, t); KUNIT_EXPECT_EQ(test, @@ -269,10 +284,17 @@ static void damon_test_split_evenly_succ(struct kunit *test, unsigned long start, unsigned long end, unsigned int nr_pieces) { struct damon_target *t = damon_new_target(); - struct damon_region *r = damon_new_region(start, end); + struct damon_region *r; unsigned long expected_width = (end - start) / nr_pieces; unsigned long i = 0;
+ if (!t) + kunit_skip(test, "target alloc fail"); + r = damon_new_region(start, end); + if (!r) { + damon_free_target(t); + kunit_skip(test, "region alloc fail"); + } damon_add_region(r, t); KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, nr_pieces), 0); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ed4873e18c75..c49b8520b364 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -28,6 +28,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/bug.h> +#include <linux/vmalloc.h>
#include "kasan.h" #include "../slab.h" @@ -559,3 +560,34 @@ bool __kasan_check_byte(const void *address, unsigned long ip) } return true; } + +#ifdef CONFIG_KASAN_VMALLOC +void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ + unsigned long size; + void *addr; + int area; + u8 tag; + + /* + * If KASAN_VMALLOC_KEEP_TAG was set at this point, all vms[] pointers + * would be unpoisoned with the KASAN_TAG_KERNEL which would disable + * KASAN checks down the line. + */ + if (WARN_ON_ONCE(flags & KASAN_VMALLOC_KEEP_TAG)) + return; + + size = vms[0]->size; + addr = vms[0]->addr; + vms[0]->addr = __kasan_unpoison_vmalloc(addr, size, flags); + tag = get_tag(vms[0]->addr); + + for (area = 1 ; area < nr_vms ; area++) { + size = vms[area]->size; + addr = set_tag(vms[area]->addr, tag); + vms[area]->addr = + __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG); + } +} +#endif diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 9958ebc15d38..faf0d954a296 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -345,7 +345,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, return (void *)start; }
- tag = kasan_random_tag(); + tag = (flags & KASAN_VMALLOC_KEEP_TAG) ? get_tag(start) : kasan_random_tag(); start = set_tag(start, tag);
/* Unpoison and initialize memory up to size. */ diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 88d1c9dcb507..b18cfa5e2cb7 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -561,7 +561,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, !(flags & KASAN_VMALLOC_PROT_NORMAL)) return (void *)start;
- start = set_tag(start, kasan_random_tag()); + if (unlikely(!(flags & KASAN_VMALLOC_KEEP_TAG))) + start = set_tag(start, kasan_random_tag()); + kasan_unpoison(start, size, false); return (void *)start; } diff --git a/mm/ksm.c b/mm/ksm.c index 1601e36a819d..15f558f69093 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2704,8 +2704,14 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) spin_unlock(&ksm_mmlist_lock);
mm_slot_free(mm_slot_cache, mm_slot); + /* + * Only clear MMF_VM_MERGEABLE. We must not clear + * MMF_VM_MERGE_ANY, because for those MMF_VM_MERGE_ANY process, + * perhaps their mm_struct has just been added to ksm_mm_slot + * list, and its process has not yet officially started running + * or has not yet performed mmap/brk to allocate anonymous VMAS. + */ clear_bit(MMF_VM_MERGEABLE, &mm->flags); - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmap_read_unlock(mm); mmdrop(mm); } else { @@ -2820,8 +2826,16 @@ void ksm_add_vma(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm;
- if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) { __ksm_add_vma(vma); + /* + * Generally, the flags here always include MMF_VM_MERGEABLE. + * However, in rare cases, this flag may be cleared by ksmd who + * scans a cycle without finding any mergeable vma. + */ + if (unlikely(!test_bit(MMF_VM_MERGEABLE, &mm->flags))) + __ksm_enter(mm); + } }
static void ksm_add_vmas(struct mm_struct *mm) diff --git a/mm/page_owner.c b/mm/page_owner.c index 2d6360eaccbb..f92cec789eee 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -933,7 +933,7 @@ static const struct file_operations page_owner_stack_operations = { .open = page_owner_stack_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_private, };
static int page_owner_threshold_get(void *data, u64 *val) diff --git a/mm/shmem.c b/mm/shmem.c index 7e07188e8269..0c3113b5b5aa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3749,6 +3749,7 @@ static int shmem_rename2(struct mnt_idmap *idmap, { struct inode *inode = d_inode(old_dentry); int they_are_dirs = S_ISDIR(inode->i_mode); + bool had_offset = false; int error;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) @@ -3761,16 +3762,23 @@ static int shmem_rename2(struct mnt_idmap *idmap, if (!simple_empty(new_dentry)) return -ENOTEMPTY;
+ error = simple_offset_add(shmem_get_offset_ctx(new_dir), new_dentry); + if (error == -EBUSY) + had_offset = true; + else if (unlikely(error)) + return error; + if (flags & RENAME_WHITEOUT) { error = shmem_whiteout(idmap, old_dir, old_dentry); - if (error) + if (error) { + if (!had_offset) + simple_offset_remove(shmem_get_offset_ctx(new_dir), + new_dentry); return error; + } }
- error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry); - if (error) - return error; - + simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry); if (d_really_is_positive(new_dentry)) { (void) shmem_unlink(new_dir, new_dentry); if (they_are_dirs) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3519c4e4f841..98e3671e9467 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4118,7 +4118,9 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) */ if (size <= alloced_size) { kasan_unpoison_vmalloc(p + old_size, size - old_size, - KASAN_VMALLOC_PROT_NORMAL); + KASAN_VMALLOC_PROT_NORMAL | + KASAN_VMALLOC_VM_ALLOC | + KASAN_VMALLOC_KEEP_TAG); /* * No need to zero memory here, as unused memory will have * already been zeroed at initial allocation time or during @@ -4810,9 +4812,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ - for (area = 0; area < nr_vms; area++) - vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, - vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); + kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
kfree(vas); return vms; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index d60996352722..e1ab395277f3 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -438,7 +438,6 @@ static int __rfcomm_release_dev(void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dev *dev; - struct tty_struct *tty;
if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; @@ -464,11 +463,7 @@ static int __rfcomm_release_dev(void __user *arg) rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */ - tty = tty_port_tty_get(&dev->port); - if (tty) { - tty_vhangup(tty); - tty_kref_put(tty); - } + tty_port_tty_vhangup(&dev->port);
if (!test_bit(RFCOMM_TTY_OWNED, &dev->status)) tty_port_put(&dev->port); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 741b0b8c4bab..a2e59108a5dc 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -247,6 +247,7 @@ struct net_bridge_vlan { * struct net_bridge_vlan_group * * @vlan_hash: VLAN entry rhashtable + * @tunnel_hash: Hash table to map from tunnel key ID (e.g. VXLAN VNI) to VLAN * @vlan_list: sorted VLAN entry list * @num_vlans: number of total VLAN entries * @pvid: PVID VLAN id diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index 6651a8dc62e0..d4d63586053a 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c @@ -92,8 +92,15 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) len = le16_to_cpu(tmp);
/* Subtract for FCS on length if FCS is not used. */ - if (!this->dofcs) + if (!this->dofcs) { + if (len < 2) { + ++cffrml_rcv_error; + pr_err("Invalid frame length (%d)\n", len); + cfpkt_destroy(pkt); + return -EPROTO; + } len -= 2; + }
if (cfpkt_setlen(pkt, len) < 0) { ++cffrml_rcv_error; diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d245fa508e1c..f5f60deb680a 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -806,51 +806,49 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) ceph_decode_need(p, end, len, bad); pool_end = *p + len;
+ ceph_decode_need(p, end, 4 + 4 + 4, bad); pi->type = ceph_decode_8(p); pi->size = ceph_decode_8(p); pi->crush_ruleset = ceph_decode_8(p); pi->object_hash = ceph_decode_8(p); - pi->pg_num = ceph_decode_32(p); pi->pgp_num = ceph_decode_32(p);
- *p += 4 + 4; /* skip lpg* */ - *p += 4; /* skip last_change */ - *p += 8 + 4; /* skip snap_seq, snap_epoch */ + /* lpg*, last_change, snap_seq, snap_epoch */ + ceph_decode_skip_n(p, end, 8 + 4 + 8 + 4, bad);
/* skip snaps */ - num = ceph_decode_32(p); + ceph_decode_32_safe(p, end, num, bad); while (num--) { - *p += 8; /* snapid key */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* snapid key, pool snap (with versions) */ + ceph_decode_skip_n(p, end, 8 + 2, bad); + ceph_decode_skip_string(p, end, bad); }
- /* skip removed_snaps */ - num = ceph_decode_32(p); - *p += num * (8 + 8); + /* removed_snaps */ + ceph_decode_skip_map(p, end, 64, 64, bad);
+ ceph_decode_need(p, end, 8 + 8 + 4, bad); *p += 8; /* skip auid */ pi->flags = ceph_decode_64(p); *p += 4; /* skip crash_replay_interval */
if (ev >= 7) - pi->min_size = ceph_decode_8(p); + ceph_decode_8_safe(p, end, pi->min_size, bad); else pi->min_size = pi->size - pi->size / 2;
if (ev >= 8) - *p += 8 + 8; /* skip quota_max_* */ + /* quota_max_* */ + ceph_decode_skip_n(p, end, 8 + 8, bad);
if (ev >= 9) { - /* skip tiers */ - num = ceph_decode_32(p); - *p += num * 8; + /* tiers */ + ceph_decode_skip_set(p, end, 64, bad);
+ ceph_decode_need(p, end, 8 + 1 + 8 + 8, bad); *p += 8; /* skip tier_of */ *p += 1; /* skip cache_mode */ - pi->read_tier = ceph_decode_64(p); pi->write_tier = ceph_decode_64(p); } else { @@ -858,86 +856,76 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) pi->write_tier = -1; }
- if (ev >= 10) { - /* skip properties */ - num = ceph_decode_32(p); - while (num--) { - len = ceph_decode_32(p); - *p += len; /* key */ - len = ceph_decode_32(p); - *p += len; /* val */ - } - } + if (ev >= 10) + /* properties */ + ceph_decode_skip_map(p, end, string, string, bad);
if (ev >= 11) { - /* skip hit_set_params */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* hit_set_params (with versions) */ + ceph_decode_skip_n(p, end, 2, bad); + ceph_decode_skip_string(p, end, bad);
- *p += 4; /* skip hit_set_period */ - *p += 4; /* skip hit_set_count */ + /* hit_set_period, hit_set_count */ + ceph_decode_skip_n(p, end, 4 + 4, bad); }
if (ev >= 12) - *p += 4; /* skip stripe_width */ + /* stripe_width */ + ceph_decode_skip_32(p, end, bad);
- if (ev >= 13) { - *p += 8; /* skip target_max_bytes */ - *p += 8; /* skip target_max_objects */ - *p += 4; /* skip cache_target_dirty_ratio_micro */ - *p += 4; /* skip cache_target_full_ratio_micro */ - *p += 4; /* skip cache_min_flush_age */ - *p += 4; /* skip cache_min_evict_age */ - } + if (ev >= 13) + /* target_max_*, cache_target_*, cache_min_* */ + ceph_decode_skip_n(p, end, 16 + 8 + 8, bad);
- if (ev >= 14) { - /* skip erasure_code_profile */ - len = ceph_decode_32(p); - *p += len; - } + if (ev >= 14) + /* erasure_code_profile */ + ceph_decode_skip_string(p, end, bad);
/* * last_force_op_resend_preluminous, will be overridden if the * map was encoded with RESEND_ON_SPLIT */ if (ev >= 15) - pi->last_force_request_resend = ceph_decode_32(p); + ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad); else pi->last_force_request_resend = 0;
if (ev >= 16) - *p += 4; /* skip min_read_recency_for_promote */ + /* min_read_recency_for_promote */ + ceph_decode_skip_32(p, end, bad);
if (ev >= 17) - *p += 8; /* skip expected_num_objects */ + /* expected_num_objects */ + ceph_decode_skip_64(p, end, bad);
if (ev >= 19) - *p += 4; /* skip cache_target_dirty_high_ratio_micro */ + /* cache_target_dirty_high_ratio_micro */ + ceph_decode_skip_32(p, end, bad);
if (ev >= 20) - *p += 4; /* skip min_write_recency_for_promote */ + /* min_write_recency_for_promote */ + ceph_decode_skip_32(p, end, bad);
if (ev >= 21) - *p += 1; /* skip use_gmt_hitset */ + /* use_gmt_hitset */ + ceph_decode_skip_8(p, end, bad);
if (ev >= 22) - *p += 1; /* skip fast_read */ + /* fast_read */ + ceph_decode_skip_8(p, end, bad);
- if (ev >= 23) { - *p += 4; /* skip hit_set_grade_decay_rate */ - *p += 4; /* skip hit_set_search_last_n */ - } + if (ev >= 23) + /* hit_set_grade_decay_rate, hit_set_search_last_n */ + ceph_decode_skip_n(p, end, 4 + 4, bad);
if (ev >= 24) { - /* skip opts */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* opts (with versions) */ + ceph_decode_skip_n(p, end, 2, bad); + ceph_decode_skip_string(p, end, bad); }
if (ev >= 25) - pi->last_force_request_resend = ceph_decode_32(p); + ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
/* ignore the rest */
diff --git a/net/core/sock.c b/net/core/sock.c index 1781f3a642b4..97cc796a1d33 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2524,7 +2524,7 @@ void sk_free_unlock_clone(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
-static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) +static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev) { bool is_ipv6 = false; u32 max_size; @@ -2534,8 +2534,8 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); #endif /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ - max_size = is_ipv6 ? READ_ONCE(dst_dev(dst)->gso_max_size) : - READ_ONCE(dst_dev(dst)->gso_ipv4_max_size); + max_size = is_ipv6 ? READ_ONCE(dev->gso_max_size) : + READ_ONCE(dev->gso_ipv4_max_size); if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) max_size = GSO_LEGACY_MAX_SIZE;
@@ -2544,9 +2544,12 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { + const struct net_device *dev; u32 max_segs = 1;
- sk->sk_route_caps = dst_dev(dst)->features; + rcu_read_lock(); + dev = dst_dev_rcu(dst); + sk->sk_route_caps = dev->features; if (sk_is_tcp(sk)) { struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2562,13 +2565,14 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; - sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst); + sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dev); /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ - max_segs = max_t(u32, READ_ONCE(dst_dev(dst)->gso_max_segs), 1); + max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1); } } sk->sk_gso_max_segs = max_segs; sk_dst_set(sk, dst); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(sk_setup_caps);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index ac3a252969cb..97599e0d5a1d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -366,16 +366,10 @@ static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst) { - struct device_node *ethernet; - struct net_device *conduit; struct dsa_port *cpu_dp;
cpu_dp = dsa_tree_find_first_cpu(dst); - ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0); - conduit = of_find_net_device_by_node(ethernet); - of_node_put(ethernet); - - return conduit; + return cpu_dp->conduit; }
/* Assign the default CPU port (the first one in the tree) to all ports of the diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 8b9692c35e70..67fba88f6098 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -2231,7 +2231,10 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) return -ENOMEM; WARN_ON_ONCE(!ret);
- gstrings.len = ret; + if (gstrings.len && gstrings.len != ret) + gstrings.len = 0; + else + gstrings.len = ret;
if (gstrings.len) { data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN)); @@ -2353,10 +2356,13 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT;
- stats.n_stats = n_stats; + if (stats.n_stats && stats.n_stats != n_stats) + stats.n_stats = 0; + else + stats.n_stats = n_stats;
- if (n_stats) { - data = vzalloc(array_size(n_stats, sizeof(u64))); + if (stats.n_stats) { + data = vzalloc(array_size(stats.n_stats, sizeof(u64))); if (!data) return -ENOMEM; ops->get_ethtool_stats(dev, &stats, data); @@ -2368,7 +2374,9 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); - if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) + if (stats.n_stats && + copy_to_user(useraddr, data, + array_size(stats.n_stats, sizeof(u64)))) goto out; ret = 0;
@@ -2404,6 +2412,10 @@ static int ethtool_get_phy_stats_phydev(struct phy_device *phydev, return -EOPNOTSUPP;
n_stats = phy_ops->get_sset_count(phydev); + if (stats->n_stats && stats->n_stats != n_stats) { + stats->n_stats = 0; + return 0; + }
ret = ethtool_vzalloc_stats_array(n_stats, data); if (ret) @@ -2424,6 +2436,10 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev, return -EOPNOTSUPP;
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); + if (stats->n_stats && stats->n_stats != n_stats) { + stats->n_stats = 0; + return 0; + }
ret = ethtool_vzalloc_stats_array(n_stats, data); if (ret) @@ -2460,7 +2476,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) }
useraddr += sizeof(stats); - if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64)))) + if (stats.n_stats && + copy_to_user(useraddr, data, + array_size(stats.n_stats, sizeof(u64)))) ret = -EFAULT;
out: diff --git a/net/handshake/request.c b/net/handshake/request.c index 94d5cef3e048..5df102534a59 100644 --- a/net/handshake/request.c +++ b/net/handshake/request.c @@ -277,6 +277,8 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req, out_unlock: spin_unlock(&hn->hn_lock); out_err: + /* Restore original destructor so socket teardown still runs on failure */ + req->hr_sk->sk_destruct = req->hr_odestruct; trace_handshake_submit_err(net, req, req->hr_sk, ret); handshake_req_destroy(req); return ret; @@ -325,7 +327,11 @@ bool handshake_req_cancel(struct sock *sk)
hn = handshake_pernet(net); if (hn && remove_pending(hn, req)) { - /* Request hadn't been accepted */ + /* Request hadn't been accepted - mark cancelled */ + if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) { + trace_handshake_cancel_busy(net, req, sk); + return false; + } goto out_true; } if (test_and_set_bit(HANDSHAKE_F_REQ_COMPLETED, &req->hr_flags)) { diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 386aba50930a..acbd77ce6afc 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -682,9 +682,14 @@ struct net_device *hsr_get_port_ndev(struct net_device *ndev, struct hsr_priv *hsr = netdev_priv(ndev); struct hsr_port *port;
+ rcu_read_lock(); hsr_for_each_port(hsr, port) - if (port->type == pt) + if (port->type == pt) { + dev_hold(port->dev); + rcu_read_unlock(); return port->dev; + } + rcu_read_unlock(); return NULL; } EXPORT_SYMBOL(hsr_get_port_ndev); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ace4e355d164..fa97405c517c 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -205,6 +205,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, __pskb_copy(frame->skb_prp, skb_headroom(frame->skb_prp), GFP_ATOMIC); + if (!frame->skb_std) + return NULL; } else { /* Unexpected */ WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index cc86031d2050..658f26d9a9ec 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2059,10 +2059,11 @@ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) continue; }
- /* Do not flush error routes if network namespace is - * not being dismantled + /* When not flushing the entire table, skip error + * routes that are not marked for deletion. */ - if (!flush_all && fib_props[fa->fa_type].error) { + if (!flush_all && fib_props[fa->fa_type].error && + !(fi->fib_flags & RTNH_F_DEAD)) { slen = fa->fa_slen; continue; } diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index a247bb93908b..f5cc02ea3092 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1342,7 +1342,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, /* At this point new_end aligns to 4n, so (new_end & 4) pads to 8n */ pad = ((new_end & 4) + (end & 7)) & 7; len_delta = new_end - (int)end + pad; - ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); + ret_val = skb_cow(skb, + skb_headroom(skb) + (len_delta > 0 ? len_delta : 0)); if (ret_val < 0) return ret_val;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 6789623b2b0d..20f77fdfb73d 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -306,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { - __IP6_INC_STATS(dev_net(dst->dev), idev, + __IP6_INC_STATS(dev_net(dst_dev(dst)), idev, IPSTATS_MIB_INHDRERRORS); fail_and_free: kfree_skb(skb); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 4d14ab7f7e99..8117c1784596 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -196,6 +196,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, struct flowi6 *fl6, bool apply_ratelimit) { struct net *net = sock_net(sk); + struct net_device *dev; struct dst_entry *dst; bool res = false;
@@ -208,10 +209,11 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, * this lookup should be more aggressive (not longer than timeout). */ dst = ip6_route_output(net, sk, fl6); + dev = dst_dev(dst); if (dst->error) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); - } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { + } else if (dev && (dev->flags & IFF_LOOPBACK)) { res = true; } else { struct rt6_info *rt = dst_rt6_info(dst); diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 7d574f5132e2..7bb9edc5c28c 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -70,7 +70,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) */
memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = orig_dst->dev->ifindex; + fl6.flowi6_oif = dst_dev(orig_dst)->ifindex; fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst), &ip6h->daddr); diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c index 647dd8417c6c..9e4774771023 100644 --- a/net/ipv6/ioam6_iptunnel.c +++ b/net/ipv6/ioam6_iptunnel.c @@ -328,7 +328,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb, if (has_tunsrc) memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc)); else - ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr, + ipv6_dev_get_saddr(net, dst_dev(dst), &hdr->daddr, IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
skb_postpush_rcsum(skb, hdr, len); @@ -338,7 +338,8 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL; + struct dst_entry *orig_dst = skb_dst(skb); + struct dst_entry *dst = NULL; struct ioam6_lwt *ilwt; int err = -EINVAL; u32 pkt_cnt; @@ -346,7 +347,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (skb->protocol != htons(ETH_P_IPV6)) goto drop;
- ilwt = ioam6_lwt_state(dst->lwtstate); + ilwt = ioam6_lwt_state(orig_dst->lwtstate);
/* Check for insertion frequency (i.e., "k over n" insertions) */ pkt_cnt = atomic_fetch_inc(&ilwt->pkt_cnt); @@ -354,7 +355,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto out;
local_bh_disable(); - cache_dst = dst_cache_get(&ilwt->cache); + dst = dst_cache_get(&ilwt->cache); local_bh_enable();
switch (ilwt->mode) { @@ -364,7 +365,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP) goto out;
- err = ioam6_do_inline(net, skb, &ilwt->tuninfo, cache_dst); + err = ioam6_do_inline(net, skb, &ilwt->tuninfo, dst); if (unlikely(err)) goto drop;
@@ -374,7 +375,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) /* Encapsulation (ip6ip6) */ err = ioam6_do_encap(net, skb, &ilwt->tuninfo, ilwt->has_tunsrc, &ilwt->tunsrc, - &ilwt->tundst, cache_dst); + &ilwt->tundst, dst); if (unlikely(err)) goto drop;
@@ -392,7 +393,7 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; }
- if (unlikely(!cache_dst)) { + if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); struct flowi6 fl6;
@@ -403,20 +404,20 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = hdr->nexthdr;
- cache_dst = ip6_route_output(net, NULL, &fl6); - if (cache_dst->error) { - err = cache_dst->error; + dst = ip6_route_output(net, NULL, &fl6); + if (dst->error) { + err = dst->error; goto drop; }
/* cache only if we don't create a dst reference loop */ - if (dst->lwtstate != cache_dst->lwtstate) { + if (orig_dst->lwtstate != dst->lwtstate) { local_bh_disable(); - dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr); + dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr); local_bh_enable(); }
- err = skb_cow_head(skb, LL_RESERVED_SPACE(cache_dst->dev)); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst))); if (unlikely(err)) goto drop; } @@ -424,16 +425,16 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) /* avoid lwtunnel_output() reentry loop when destination is the same * after transformation (e.g., with the inline mode) */ - if (dst->lwtstate != cache_dst->lwtstate) { + if (orig_dst->lwtstate != dst->lwtstate) { skb_dst_drop(skb); - skb_dst_set(skb, cache_dst); + skb_dst_set(skb, dst); return dst_output(net, sk, skb); } out: - dst_release(cache_dst); - return dst->lwtstate->orig_output(net, sk, skb); + dst_release(dst); + return orig_dst->lwtstate->orig_output(net, sk, skb); drop: - dst_release(cache_dst); + dst_release(dst); kfree_skb(skb); return err; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 68e9a41eed49..0b736627ebaf 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1084,9 +1084,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */ - if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) - dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false); - + if (!t->parms.collect_md && dst) { + mtu = READ_ONCE(dst_dev(dst)->mtu); + if (dst_mtu(dst) > mtu) + dst->ops->update_pmtu(dst, NULL, skb, mtu, false); + } err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, NEXTHDR_GRE); if (err != 0) { @@ -1395,9 +1397,16 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, { struct ip6_tnl *t = netdev_priv(dev); struct ipv6hdr *ipv6h; + int needed; __be16 *p;
- ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h)); + needed = t->hlen + sizeof(*ipv6h); + if (skb_headroom(skb) < needed && + pskb_expand_head(skb, HH_DATA_ALIGN(needed - skb_headroom(skb)), + 0, GFP_ATOMIC)) + return -needed; + + ipv6h = skb_push(skb, needed); ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb, t->fl.u.ip6.flowlabel, true, &t->fl.u.ip6)); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f0e5431c2d46..24b68e99636e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -60,7 +60,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); struct inet6_dev *idev = ip6_dst_idev(dst); unsigned int hh_len = LL_RESERVED_SPACE(dev); const struct in6_addr *daddr, *nexthop; @@ -271,7 +271,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, const struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); struct inet6_dev *idev = ip6_dst_idev(dst); struct hop_jumbo_hdr *hop_jumbo; int hoplen = sizeof(*hop_jumbo); @@ -503,7 +503,8 @@ int ip6_forward(struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); - struct net *net = dev_net(dst->dev); + struct net *net = dev_net(dst_dev(dst)); + struct net_device *dev; struct inet6_dev *idev; SKB_DR(reason); u32 mtu; @@ -591,12 +592,12 @@ int ip6_forward(struct sk_buff *skb) goto drop; } dst = skb_dst(skb); - + dev = dst_dev(dst); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ - if (IP6CB(skb)->iif == dst->dev->ifindex && + if (IP6CB(skb)->iif == dev->ifindex && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct inet_peer *peer; @@ -644,7 +645,7 @@ int ip6_forward(struct sk_buff *skb)
if (ip6_pkt_too_big(skb, mtu)) { /* Again, force OUTPUT device used as source address */ - skb->dev = dst->dev; + skb->dev = dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS); __IP6_INC_STATS(net, ip6_dst_idev(dst), @@ -653,7 +654,7 @@ int ip6_forward(struct sk_buff *skb) return -EMSGSIZE; }
- if (skb_cow(skb, dst->dev->hard_header_len)) { + if (skb_cow(skb, dev->hard_header_len)) { __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; @@ -666,7 +667,7 @@ int ip6_forward(struct sk_buff *skb) hdr->hop_limit--;
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, - net, NULL, skb, skb->dev, dst->dev, + net, NULL, skb, skb->dev, dev, ip6_forward_finish);
error: @@ -1093,7 +1094,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, #ifdef CONFIG_IPV6_SUBTREES ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || #endif - (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { + (fl6->flowi6_oif && fl6->flowi6_oif != dst_dev(dst)->ifindex)) { dst_release(dst); dst = NULL; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b72ca1034906..6450ecf0d0a7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1179,7 +1179,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, ndst = dst; }
- tdev = dst->dev; + tdev = dst_dev(dst);
if (tdev == dev) { DEV_STATS_INC(dev, collisions); @@ -1255,7 +1255,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, /* Calculate max headroom for all the headers and adjust * needed_headroom if necessary. */ - max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr) + dst->header_len + t->hlen; ip_tunnel_adj_headroom(dev, max_headroom);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index c99053189ea8..2acf1bb93fc0 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -168,7 +168,7 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr); return ERR_PTR(-ENETUNREACH); } - if (dst->dev == dev) { /* is this necessary? */ + if (dst_dev(dst) == dev) { /* is this necessary? */ netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr); dst_release(dst); return ERR_PTR(-ELOOP); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 012350469144..fd6f76e36e80 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -497,7 +497,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) (const struct in6_addr *)&x->id.daddr)) goto tx_err_link_failure;
- tdev = dst->dev; + tdev = dst_dev(dst);
if (tdev == dev) { DEV_STATS_INC(dev, collisions); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 8699d1a188dc..d961e6c2d09d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -473,6 +473,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr, { struct icmp6hdr *icmp6h = icmp6_hdr(skb); struct dst_entry *dst = skb_dst(skb); + struct net_device *dev; struct inet6_dev *idev; struct net *net; struct sock *sk; @@ -507,11 +508,12 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
- idev = __in6_dev_get(dst->dev); + dev = dst_dev(dst); + idev = __in6_dev_get(dev); IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, - net, sk, skb, NULL, dst->dev, + net, sk, skb, NULL, dev, dst_output); if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, type); diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 0c39c77fe8a8..a8bb04bd94cf 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c @@ -38,7 +38,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb, } skb_dst_drop(skb); skb_dst_set(skb, dst); - skb->dev = dst->dev; + skb->dev = dst_dev(dst); skb->protocol = htons(ETH_P_IPV6);
return true; diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 806d4b5dd1e6..90a178dd24aa 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -105,7 +105,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst) { int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); if (hoplimit == 0) { - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); struct inet6_dev *idev;
rcu_read_lock(); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 22866444efc0..aeac45af3a22 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -228,13 +228,13 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst, const struct rt6_info *rt = dst_rt6_info(dst);
return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any), - dst->dev, skb, daddr); + dst_dev(dst), skb, daddr); }
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) { const struct rt6_info *rt = dst_rt6_info(dst); - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst);
daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr); if (!daddr) @@ -1470,7 +1470,18 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net,
p = this_cpu_ptr(res->nh->rt6i_pcpu); prev = cmpxchg(p, NULL, pcpu_rt); - BUG_ON(prev); + if (unlikely(prev)) { + /* + * Another task on this CPU already installed a pcpu_rt. + * This can happen on PREEMPT_RT where preemption is possible. + * Free our allocation and return the existing one. + */ + WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RT)); + + dst_dev_put(&pcpu_rt->dst); + dst_release(&pcpu_rt->dst); + return prev; + }
if (res->f6i->fib6_destroying) { struct fib6_info *from; @@ -2934,7 +2945,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
if (res.f6i->nh) { struct fib6_nh_match_arg arg = { - .dev = dst->dev, + .dev = dst_dev(dst), .gw = &rt6->rt6i_gateway, };
@@ -3229,7 +3240,7 @@ EXPORT_SYMBOL_GPL(ip6_sk_redirect);
static unsigned int ip6_default_advmss(const struct dst_entry *dst) { - struct net_device *dev = dst->dev; + struct net_device *dev = dst_dev(dst); unsigned int mtu = dst_mtu(dst); struct net *net;
@@ -4253,7 +4264,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
if (res.f6i->nh) { struct fib6_nh_match_arg arg = { - .dev = dst->dev, + .dev = dst_dev(dst), .gw = &rt->rt6i_gateway, };
@@ -4540,13 +4551,14 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg) static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) { struct dst_entry *dst = skb_dst(skb); - struct net *net = dev_net(dst->dev); + struct net_device *dev = dst_dev(dst); + struct net *net = dev_net(dev); struct inet6_dev *idev; SKB_DR(reason); int type;
if (netif_is_l3_master(skb->dev) || - dst->dev == net->loopback_dev) + dev == net->loopback_dev) idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); else idev = ip6_dst_idev(dst); @@ -5764,11 +5776,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, * each as a nexthop within RTA_MULTIPATH. */ if (rt6) { + struct net_device *dev; + if (rt6_flags & RTF_GATEWAY && nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) goto nla_put_failure;
- if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) + dev = dst_dev(dst); + if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure;
if (dst->lwtstate && diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index eccfa4203e96..c7942cf65567 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -242,7 +242,7 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) local_bh_enable(); }
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst))); if (unlikely(err)) goto drop; } @@ -297,7 +297,7 @@ static int rpl_input(struct sk_buff *skb) local_bh_enable(); }
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst))); if (unlikely(err)) goto drop; } else { diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 51583461ae29..27918fc0c972 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -128,7 +128,8 @@ static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto, struct dst_entry *cache_dst) { struct dst_entry *dst = skb_dst(skb); - struct net *net = dev_net(dst->dev); + struct net_device *dev = dst_dev(dst); + struct net *net = dev_net(dev); struct ipv6hdr *hdr, *inner_hdr; struct ipv6_sr_hdr *isrh; int hdrlen, tot_len, err; @@ -181,7 +182,7 @@ static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, isrh->nexthdr = proto;
hdr->daddr = isrh->segments[isrh->first_segment]; - set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr); + set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
#ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(isrh)) { @@ -212,7 +213,8 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb, { __u8 first_seg = osrh->first_segment; struct dst_entry *dst = skb_dst(skb); - struct net *net = dev_net(dst->dev); + struct net_device *dev = dst_dev(dst); + struct net *net = dev_net(dev); struct ipv6hdr *hdr, *inner_hdr; int hdrlen = ipv6_optlen(osrh); int red_tlv_offset, tlv_offset; @@ -270,7 +272,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb, if (skip_srh) { hdr->nexthdr = proto;
- set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr); + set_tun_src(net, dev, &hdr->daddr, &hdr->saddr); goto out; }
@@ -306,7 +308,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
srcaddr: isrh->nexthdr = proto; - set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr); + set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
#ifdef CONFIG_IPV6_SEG6_HMAC if (unlikely(!skip_srh && sr_has_hmac(isrh))) { @@ -507,7 +509,7 @@ static int seg6_input_core(struct net *net, struct sock *sk, local_bh_enable(); }
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst))); if (unlikely(err)) goto drop; } else { @@ -518,7 +520,7 @@ static int seg6_input_core(struct net *net, struct sock *sk, if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, dev_net(skb->dev), NULL, skb, NULL, - skb_dst(skb)->dev, seg6_input_finish); + skb_dst_dev(skb), seg6_input_finish);
return seg6_input_finish(dev_net(skb->dev), NULL, skb); drop: @@ -593,7 +595,7 @@ static int seg6_output_core(struct net *net, struct sock *sk, local_bh_enable(); }
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst))); if (unlikely(err)) goto drop; } @@ -603,7 +605,7 @@ static int seg6_output_core(struct net *net, struct sock *sk,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, - NULL, skb_dst(skb)->dev, dst_output); + NULL, dst_dev(dst), dst_output);
return dst_output(net, sk, skb); drop: diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index e445a0a45568..bb196d451a55 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -310,7 +310,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, if (!local_delivery) dev_flags |= IFF_LOOPBACK;
- if (dst && (dst->dev->flags & dev_flags) && !dst->error) { + if (dst && (dst_dev(dst)->flags & dev_flags) && !dst->error) { dst_release(dst); dst = NULL; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2df4df75f195..0abb687fd58d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1140,7 +1140,6 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
size = sizeof(*new) + new_head_len + new_tail_len;
- /* new or old multiple BSSID elements? */ if (params->mbssid_ies) { mbssid = params->mbssid_ies; size += struct_size(new->mbssid_ies, elem, mbssid->cnt); @@ -1150,15 +1149,6 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, } size += ieee80211_get_mbssid_beacon_len(mbssid, rnr, mbssid->cnt); - } else if (old && old->mbssid_ies) { - mbssid = old->mbssid_ies; - size += struct_size(new->mbssid_ies, elem, mbssid->cnt); - if (old && old->rnr_ies) { - rnr = old->rnr_ies; - size += struct_size(new->rnr_ies, elem, rnr->cnt); - } - size += ieee80211_get_mbssid_beacon_len(mbssid, rnr, - mbssid->cnt); }
new = kzalloc(size, GFP_KERNEL); diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 42329ae21c46..0b4ab3c816da 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1409,7 +1409,8 @@ int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, }
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS]) - entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]); + entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]) & + MPTCP_PM_ADDR_FLAGS_MASK;
if (tb[MPTCP_PM_ADDR_ATTR_PORT]) entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index ed29132fd48e..aab3c96ecd1c 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1637,7 +1637,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) struct mptcp_sendmsg_info info = { .flags = flags, }; - bool do_check_data_fin = false; + bool copied = false; int push_count = 1;
while (mptcp_send_head(sk) && (push_count > 0)) { @@ -1679,7 +1679,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) push_count--; continue; } - do_check_data_fin = true; + copied = true; } } } @@ -1688,11 +1688,14 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) if (ssk) mptcp_push_release(ssk, &info);
- /* ensure the rtx timer is running */ - if (!mptcp_rtx_timer_pending(sk)) - mptcp_reset_rtx_timer(sk); - if (do_check_data_fin) + /* Avoid scheduling the rtx timer if no data has been pushed; the timer + * will be updated on positive acks by __mptcp_cleanup_una(). + */ + if (copied) { + if (!mptcp_rtx_timer_pending(sk)) + mptcp_reset_rtx_timer(sk); mptcp_check_send_data_fin(sk); + } }
static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) @@ -2746,10 +2749,13 @@ static void __mptcp_retrans(struct sock *sk)
/* * make the whole retrans decision, xmit, disallow - * fallback atomic + * fallback atomic, note that we can't retrans even + * when an infinite fallback is in progress, i.e. new + * subflows are disallowed. */ spin_lock_bh(&msk->fallback_lock); - if (__mptcp_check_fallback(msk)) { + if (__mptcp_check_fallback(msk) || + !msk->allow_subflows) { spin_unlock_bh(&msk->fallback_lock); release_sock(ssk); goto clear_scheduled; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 014f07740369..fa2db17f6298 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -409,6 +409,9 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, return -1;
err_unreach: + if (!skb->dev) + skb->dev = skb_dst(skb)->dev; + dst_link_failure(skb); return -1; } diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index b84cfb5616df..3c1b155f7a0e 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -172,14 +172,14 @@ static int __nf_conncount_add(struct net *net, struct nf_conn *found_ct; unsigned int collect = 0; bool refcounted = false; + int err = 0;
if (!get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted)) return -ENOENT;
if (ct && nf_ct_is_confirmed(ct)) { - if (refcounted) - nf_ct_put(ct); - return -EEXIST; + err = -EEXIST; + goto out_put; }
if ((u32)jiffies == list->last_gc) @@ -231,12 +231,16 @@ static int __nf_conncount_add(struct net *net, }
add_new_node: - if (WARN_ON_ONCE(list->count > INT_MAX)) - return -EOVERFLOW; + if (WARN_ON_ONCE(list->count > INT_MAX)) { + err = -EOVERFLOW; + goto out_put; + }
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); - if (conn == NULL) - return -ENOMEM; + if (conn == NULL) { + err = -ENOMEM; + goto out_put; + }
conn->tuple = tuple; conn->zone = *zone; @@ -249,7 +253,7 @@ static int __nf_conncount_add(struct net *net, out_put: if (refcounted) nf_ct_put(ct); - return 0; + return err; }
int nf_conncount_add_skb(struct net *net, @@ -446,11 +450,10 @@ insert_tree(struct net *net,
rb_link_node_rcu(&rbconn->node, parent, rbnode); rb_insert_color(&rbconn->node, root); - - if (refcounted) - nf_ct_put(ct); } out_unlock: + if (refcounted) + nf_ct_put(ct); spin_unlock_bh(&nf_conncount_locks[hash]); return count; } diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 02f10a46fab7..746acd124ea2 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -298,25 +298,13 @@ nf_nat_used_tuple_new(const struct nf_conntrack_tuple *tuple,
ct = nf_ct_tuplehash_to_ctrack(thash);
- /* NB: IP_CT_DIR_ORIGINAL should be impossible because - * nf_nat_used_tuple() handles origin collisions. - * - * Handle remote chance other CPU confirmed its ct right after. - */ - if (thash->tuple.dst.dir != IP_CT_DIR_REPLY) - goto out; - /* clashing connection subject to NAT? Retry with new tuple. */ if (READ_ONCE(ct->status) & uses_nat) goto out;
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, - &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple) && - nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, - &ignored_ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) { + &ignored_ct->tuplehash[IP_CT_DIR_REPLY].tuple)) taken = false; - goto out; - } out: nf_ct_put(ct); return taken; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e1c617b48888..b4741fb33798 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -11211,21 +11211,10 @@ static int nft_validate_register_store(const struct nft_ctx *ctx, enum nft_data_types type, unsigned int len) { - int err; - switch (reg) { case NFT_REG_VERDICT: if (type != NFT_DATA_VERDICT) return -EINVAL; - - if (data != NULL && - (data->verdict.code == NFT_GOTO || - data->verdict.code == NFT_JUMP)) { - err = nft_chain_validate(ctx, data->verdict.chain); - if (err < 0) - return err; - } - break; default: if (type != NFT_DATA_VALUE) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a1b373b99f7b..58a6ad7ed7a4 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -22,6 +22,7 @@ #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_expect.h> +#include <net/netfilter/nf_conntrack_seqadj.h>
struct nft_ct_helper_obj { struct nf_conntrack_helper *helper4; @@ -1173,6 +1174,10 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj, if (help) { rcu_assign_pointer(help->helper, to_assign); set_bit(IPS_HELPER_BIT, &ct->status); + + if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct)) + if (!nfct_seqadj_ext_add(ct)) + regs->verdict.code = NF_DROP; } }
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c index 5e531394a724..2b3cbceb0b52 100644 --- a/net/netrom/nr_out.c +++ b/net/netrom/nr_out.c @@ -43,8 +43,10 @@ void nr_output(struct sock *sk, struct sk_buff *skb) frontlen = skb_headroom(skb);
while (skb->len > 0) { - if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) + if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) { + kfree_skb(skb); return; + }
skb_reserve(skbn, frontlen);
diff --git a/net/nfc/core.c b/net/nfc/core.c index e58dc6405054..eebe9b511e0e 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1154,6 +1154,7 @@ EXPORT_SYMBOL(nfc_register_device); void nfc_unregister_device(struct nfc_dev *dev) { int rc; + struct rfkill *rfk = NULL;
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
@@ -1164,13 +1165,17 @@ void nfc_unregister_device(struct nfc_dev *dev)
device_lock(&dev->dev); if (dev->rfkill) { - rfkill_unregister(dev->rfkill); - rfkill_destroy(dev->rfkill); + rfk = dev->rfkill; dev->rfkill = NULL; } dev->shutting_down = true; device_unlock(&dev->dev);
+ if (rfk) { + rfkill_unregister(rfk); + rfkill_destroy(rfk); + } + if (dev->ops->check_presence) { del_timer_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index e3359e15aa2e..7d5490ea23e1 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2802,13 +2802,20 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, return err; }
-static bool validate_push_nsh(const struct nlattr *attr, bool log) +static bool validate_push_nsh(const struct nlattr *a, bool log) { + struct nlattr *nsh_key = nla_data(a); struct sw_flow_match match; struct sw_flow_key key;
+ /* There must be one and only one NSH header. */ + if (!nla_ok(nsh_key, nla_len(a)) || + nla_total_size(nla_len(nsh_key)) != nla_len(a) || + nla_type(nsh_key) != OVS_KEY_ATTR_NSH) + return false; + ovs_match_init(&match, &key, true, NULL); - return !nsh_key_put_from_nlattr(attr, &match, false, true, log); + return !nsh_key_put_from_nlattr(nsh_key, &match, false, true, log); }
/* Return false if there are any non-masked bits set. @@ -3388,7 +3395,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, return -EINVAL; } mac_proto = MAC_PROTO_NONE; - if (!validate_push_nsh(nla_data(a), log)) + if (!validate_push_nsh(a, log)) return -EINVAL; break;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 91a11067e458..6574f9bcdc02 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -160,10 +160,19 @@ void ovs_netdev_detach_dev(struct vport *vport)
static void netdev_destroy(struct vport *vport) { - rtnl_lock(); - if (netif_is_ovs_port(vport->dev)) - ovs_netdev_detach_dev(vport); - rtnl_unlock(); + /* When called from ovs_db_notify_wq() after a dp_device_event(), the + * port has already been detached, so we can avoid taking the RTNL by + * checking this first. + */ + if (netif_is_ovs_port(vport->dev)) { + rtnl_lock(); + /* Check again while holding the lock to ensure we don't race + * with the netdev notifier and detach twice. + */ + if (netif_is_ovs_port(vport->dev)) + ovs_netdev_detach_dev(vport); + rtnl_unlock(); + }
call_rcu(&vport->rcu, vport_netdev_free); } diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index b8078b42f5de..1676c9f4ab84 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -205,7 +205,7 @@ static void rose_kill_by_device(struct net_device *dev) spin_unlock_bh(&rose_list_lock);
for (i = 0; i < cnt; i++) { - sk = array[cnt]; + sk = array[i]; rose = rose_sk(sk); lock_sock(sk); spin_lock_bh(&rose_list_lock); diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 82635dd2cfa5..306e046276d4 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -652,7 +652,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch);
for (i = nbands; i < oldbands; i++) { - if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) + if (cl_is_active(&q->classes[i])) list_del_init(&q->classes[i].alist); qdisc_purge_queue(q->classes[i].qdisc); } @@ -664,6 +664,10 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, q->classes[i].deficit = quanta[i]; } } + for (i = q->nstrict; i < nstrict; i++) { + if (cl_is_active(&q->classes[i])) + list_del_init(&q->classes[i].alist); + } WRITE_ONCE(q->nstrict, nstrict); memcpy(q->prio2band, priomap, sizeof(priomap));
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 2d5ac2b3d526..fb362707f20e 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1083,7 +1083,8 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, }
length = min_t(unsigned int, inlen, (char *)xdr->end - (char *)xdr->p); - memcpy(page_address(in_token->pages[0]), xdr->p, length); + if (length) + memcpy(page_address(in_token->pages[0]), xdr->p, length); inlen -= length;
to_offs = length; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 40797114d50a..8156dc20b852 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -841,6 +841,9 @@ static int svc_rdma_copy_inline_range(struct svc_rqst *rqstp, for (page_no = 0; page_no < numpages; page_no++) { unsigned int page_len;
+ if (head->rc_curpage >= ARRAY_SIZE(rqstp->rq_pages)) + return -EINVAL; + page_len = min_t(unsigned int, remaining, PAGE_SIZE - head->rc_pageoff);
@@ -848,7 +851,7 @@ static int svc_rdma_copy_inline_range(struct svc_rqst *rqstp, head->rc_page_count++;
dst = page_address(rqstp->rq_pages[head->rc_curpage]); - memcpy(dst + head->rc_curpage, src + offset, page_len); + memcpy((unsigned char *)dst + head->rc_pageoff, src + offset, page_len);
head->rc_readbytes += page_len; head->rc_pageoff += page_len; @@ -860,7 +863,7 @@ static int svc_rdma_copy_inline_range(struct svc_rqst *rqstp, offset += page_len; }
- return -EINVAL; + return 0; }
/** diff --git a/net/wireless/core.c b/net/wireless/core.c index dc207a8986c7..6bb8a7037d24 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1343,6 +1343,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
cfg80211_pmsr_wdev_down(wdev);
+ cfg80211_stop_radar_detection(wdev); cfg80211_stop_background_radar_detection(wdev);
switch (wdev->iftype) { diff --git a/net/wireless/core.h b/net/wireless/core.h index 3b3e3cd7027a..d4b26cbe3342 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -483,6 +483,7 @@ cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rde struct wireless_dev *wdev, struct cfg80211_chan_def *chandef);
+void cfg80211_stop_radar_detection(struct wireless_dev *wdev); void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev);
void cfg80211_background_cac_done_wk(struct work_struct *work); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index d1a66410b9c5..26319522c7ab 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -1271,6 +1271,25 @@ cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rde return 0; }
+void cfg80211_stop_radar_detection(struct wireless_dev *wdev) +{ + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + int link_id; + + for_each_valid_link(wdev, link_id) { + struct cfg80211_chan_def chandef; + + if (!wdev->links[link_id].cac_started) + continue; + + chandef = *wdev_chandef(wdev, link_id); + rdev_end_cac(rdev, wdev->netdev, link_id); + nl80211_radar_notify(rdev, &chandef, NL80211_RADAR_CAC_ABORTED, + wdev->netdev, GFP_KERNEL); + } +} + void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index e0d3c713538b..d8250ae17d94 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -913,7 +913,7 @@ void __cfg80211_connect_result(struct net_device *dev,
ssid_len = min(ssid->datalen, IEEE80211_MAX_SSID_LEN); memcpy(wdev->u.client.ssid, ssid->data, ssid_len); - wdev->u.client.ssid_len = ssid->datalen; + wdev->u.client.ssid_len = ssid_len; break; } rcu_read_unlock(); diff --git a/net/wireless/util.c b/net/wireless/util.c index b115489a846f..6aff651a9b68 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1230,28 +1230,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, dev->ieee80211_ptr->use_4addr = false; rdev_set_qos_map(rdev, dev, NULL);
- switch (otype) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_P2P_GO: - cfg80211_stop_ap(rdev, dev, -1, true); - break; - case NL80211_IFTYPE_ADHOC: - cfg80211_leave_ibss(rdev, dev, false); - break; - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_P2P_CLIENT: - cfg80211_disconnect(rdev, dev, - WLAN_REASON_DEAUTH_LEAVING, true); - break; - case NL80211_IFTYPE_MESH_POINT: - /* mesh should be handled? */ - break; - case NL80211_IFTYPE_OCB: - cfg80211_leave_ocb(rdev, dev); - break; - default: - break; - } + cfg80211_leave(rdev, dev->ieee80211_ptr);
cfg80211_process_rdev_events(rdev); cfg80211_mlme_purge_registrations(dev->ieee80211_ptr); diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c index 328c6e60f024..3bb791e0b05e 100644 --- a/samples/ftrace/ftrace-direct-modify.c +++ b/samples/ftrace/ftrace-direct-modify.c @@ -176,8 +176,8 @@ asm ( " st.d $t0, $sp, 0\n" " st.d $ra, $sp, 8\n" " bl my_direct_func1\n" -" ld.d $t0, $sp, 0\n" -" ld.d $ra, $sp, 8\n" +" ld.d $ra, $sp, 0\n" +" ld.d $t0, $sp, 8\n" " addi.d $sp, $sp, 16\n" " jr $t0\n" " .size my_tramp1, .-my_tramp1\n" @@ -189,8 +189,8 @@ asm ( " st.d $t0, $sp, 0\n" " st.d $ra, $sp, 8\n" " bl my_direct_func2\n" -" ld.d $t0, $sp, 0\n" -" ld.d $ra, $sp, 8\n" +" ld.d $ra, $sp, 0\n" +" ld.d $t0, $sp, 8\n" " addi.d $sp, $sp, 16\n" " jr $t0\n" " .size my_tramp2, .-my_tramp2\n" diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c index f943e40d57fd..cc7f42999c00 100644 --- a/samples/ftrace/ftrace-direct-multi-modify.c +++ b/samples/ftrace/ftrace-direct-multi-modify.c @@ -199,8 +199,8 @@ asm ( " move $a0, $t0\n" " bl my_direct_func1\n" " ld.d $a0, $sp, 0\n" -" ld.d $t0, $sp, 8\n" -" ld.d $ra, $sp, 16\n" +" ld.d $ra, $sp, 8\n" +" ld.d $t0, $sp, 16\n" " addi.d $sp, $sp, 32\n" " jr $t0\n" " .size my_tramp1, .-my_tramp1\n" @@ -215,8 +215,8 @@ asm ( " move $a0, $t0\n" " bl my_direct_func2\n" " ld.d $a0, $sp, 0\n" -" ld.d $t0, $sp, 8\n" -" ld.d $ra, $sp, 16\n" +" ld.d $ra, $sp, 8\n" +" ld.d $t0, $sp, 16\n" " addi.d $sp, $sp, 32\n" " jr $t0\n" " .size my_tramp2, .-my_tramp2\n" diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c index aed6df2927ce..1cebee87c5a6 100644 --- a/samples/ftrace/ftrace-direct-multi.c +++ b/samples/ftrace/ftrace-direct-multi.c @@ -131,8 +131,8 @@ asm ( " move $a0, $t0\n" " bl my_direct_func\n" " ld.d $a0, $sp, 0\n" -" ld.d $t0, $sp, 8\n" -" ld.d $ra, $sp, 16\n" +" ld.d $ra, $sp, 8\n" +" ld.d $t0, $sp, 16\n" " addi.d $sp, $sp, 32\n" " jr $t0\n" " .size my_tramp, .-my_tramp\n" diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c index 6ff546a5d7eb..81e6fb874d8a 100644 --- a/samples/ftrace/ftrace-direct-too.c +++ b/samples/ftrace/ftrace-direct-too.c @@ -143,8 +143,8 @@ asm ( " ld.d $a0, $sp, 0\n" " ld.d $a1, $sp, 8\n" " ld.d $a2, $sp, 16\n" -" ld.d $t0, $sp, 24\n" -" ld.d $ra, $sp, 32\n" +" ld.d $ra, $sp, 24\n" +" ld.d $t0, $sp, 32\n" " addi.d $sp, $sp, 48\n" " jr $t0\n" " .size my_tramp, .-my_tramp\n" diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c index ef0945670e1e..7b0ff59f650f 100644 --- a/samples/ftrace/ftrace-direct.c +++ b/samples/ftrace/ftrace-direct.c @@ -124,8 +124,8 @@ asm ( " st.d $ra, $sp, 16\n" " bl my_direct_func\n" " ld.d $a0, $sp, 0\n" -" ld.d $t0, $sp, 8\n" -" ld.d $ra, $sp, 16\n" +" ld.d $ra, $sp, 8\n" +" ld.d $t0, $sp, 16\n" " addi.d $sp, $sp, 32\n" " jr $t0\n" " .size my_tramp, .-my_tramp\n" diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 2c5c1a214f3b..6e07023b5442 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -449,18 +449,6 @@ ifneq ($(userprogs),) include $(srctree)/scripts/Makefile.userprogs endif
-ifneq ($(need-dtbslist)$(dtb-y)$(dtb-)$(filter %.dtb %.dtb.o %.dtbo.o,$(targets)),) -include $(srctree)/scripts/Makefile.dtbs -endif - -# Build -# --------------------------------------------------------------------------- - -$(obj)/: $(if $(KBUILD_BUILTIN), $(targets-for-builtin)) \ - $(if $(KBUILD_MODULES), $(targets-for-modules)) \ - $(subdir-ym) $(always-y) - @: - # Single targets # ---------------------------------------------------------------------------
@@ -490,6 +478,20 @@ FORCE: targets += $(filter-out $(single-subdir-goals), $(MAKECMDGOALS)) targets := $(filter-out $(PHONY), $(targets))
+# Now that targets is fully known, include dtb rules if needed +ifneq ($(need-dtbslist)$(dtb-y)$(dtb-)$(filter %.dtb %.dtb.o %.dtbo.o,$(targets)),) +include $(srctree)/scripts/Makefile.dtbs +endif + +# Build +# Needs to be after the include of Makefile.dtbs, which updates always-y +# --------------------------------------------------------------------------- + +$(obj)/: $(if $(KBUILD_BUILTIN), $(targets-for-builtin)) \ + $(if $(KBUILD_MODULES), $(targets-for-modules)) \ + $(subdir-ym) $(always-y) + @: + # Read all saved command lines and dependencies for the $(targets) we # may be building above, using $(if_changed{,_dep}). As an # optimization, we don't need to read them if the target does not diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index d97720943189..5dd52788a042 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst @@ -100,7 +100,7 @@ endif # Don't stop modules_install even if we can't sign external modules. # ifeq ($(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),) -sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(srctree)/)$(CONFIG_MODULE_SIG_KEY) +sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(objtree)/)$(CONFIG_MODULE_SIG_KEY) else sig-key := $(CONFIG_MODULE_SIG_KEY) endif diff --git a/scripts/faddr2line b/scripts/faddr2line index 1fa6beef9f97..477b6d2aa317 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line @@ -107,14 +107,19 @@ find_dir_prefix() {
run_readelf() { local objfile=$1 - local out=$(${READELF} --file-header --section-headers --symbols --wide $objfile) + local tmpfile + tmpfile=$(mktemp) + + ${READELF} --file-header --section-headers --symbols --wide "$objfile" > "$tmpfile"
# This assumes that readelf first prints the file header, then the section headers, then the symbols. # Note: It seems that GNU readelf does not prefix section headers with the "There are X section headers" # line when multiple options are given, so let's also match with the "Section Headers:" line. - ELF_FILEHEADER=$(echo "${out}" | sed -n '/There are [0-9]* section headers, starting at offset|Section Headers:/q;p') - ELF_SECHEADERS=$(echo "${out}" | sed -n '/There are [0-9]* section headers, starting at offset|Section Headers:/,$p' | sed -n '/Symbol table .* contains [0-9]* entries:/q;p') - ELF_SYMS=$(echo "${out}" | sed -n '/Symbol table .* contains [0-9]* entries:/,$p') + ELF_FILEHEADER=$(sed -n '/There are [0-9]* section headers, starting at offset|Section Headers:/q;p' "$tmpfile") + ELF_SECHEADERS=$(sed -n '/There are [0-9]* section headers, starting at offset|Section Headers:/,$p' "$tmpfile" | sed -n '/Symbol table .* contains [0-9]* entries:/q;p') + ELF_SYMS=$(sed -n '/Symbol table .* contains [0-9]* entries:/,$p' "$tmpfile") + + rm -f -- "$tmpfile" }
check_vmlinux() { diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c index 024be262702f..76a86f8a8d6c 100644 --- a/security/keys/trusted-keys/trusted_tpm2.c +++ b/security/keys/trusted-keys/trusted_tpm2.c @@ -387,6 +387,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip, struct trusted_key_options *options, u32 *blob_handle) { + u8 *blob_ref __free(kfree) = NULL; struct tpm_buf buf; unsigned int private_len; unsigned int public_len; @@ -400,6 +401,9 @@ static int tpm2_load_cmd(struct tpm_chip *chip, /* old form */ blob = payload->blob; payload->old_format = 1; + } else { + /* Bind for cleanup: */ + blob_ref = blob; }
/* new format carries keyhandle but old format doesn't */ @@ -464,8 +468,6 @@ static int tpm2_load_cmd(struct tpm_chip *chip, (__be32 *) &buf.data[TPM_HEADER_SIZE]);
out: - if (blob != payload->blob) - kfree(blob); tpm_buf_destroy(&buf);
if (rc > 0) diff --git a/sound/isa/wavefront/wavefront_midi.c b/sound/isa/wavefront/wavefront_midi.c index ead8cbe638de..b9593b481396 100644 --- a/sound/isa/wavefront/wavefront_midi.c +++ b/sound/isa/wavefront/wavefront_midi.c @@ -113,7 +113,6 @@ static void snd_wavefront_midi_output_write(snd_wavefront_card_t *card) { snd_wavefront_midi_t *midi = &card->wavefront.midi; snd_wavefront_mpu_id mpu; - unsigned long flags; unsigned char midi_byte; int max = 256, mask = 1; int timeout; @@ -142,11 +141,9 @@ static void snd_wavefront_midi_output_write(snd_wavefront_card_t *card) break; } - spin_lock_irqsave (&midi->virtual, flags); - if ((midi->mode[midi->output_mpu] & MPU401_MODE_OUTPUT) == 0) { - spin_unlock_irqrestore (&midi->virtual, flags); + guard(spinlock_irqsave)(&midi->virtual); + if ((midi->mode[midi->output_mpu] & MPU401_MODE_OUTPUT) == 0) goto __second; - } if (output_ready (midi)) { if (snd_rawmidi_transmit(midi->substream_output[midi->output_mpu], &midi_byte, 1) == 1) { if (!midi->isvirtual || @@ -160,14 +157,11 @@ static void snd_wavefront_midi_output_write(snd_wavefront_card_t *card) del_timer(&midi->timer); } midi->mode[midi->output_mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; - spin_unlock_irqrestore (&midi->virtual, flags); goto __second; } } else { - spin_unlock_irqrestore (&midi->virtual, flags); return; } - spin_unlock_irqrestore (&midi->virtual, flags); }
__second: @@ -185,15 +179,13 @@ static void snd_wavefront_midi_output_write(snd_wavefront_card_t *card) break; } - spin_lock_irqsave (&midi->virtual, flags); + guard(spinlock_irqsave)(&midi->virtual); if (!midi->isvirtual) mask = 0; mpu = midi->output_mpu ^ mask; mask = 0; /* don't invert the value from now */ - if ((midi->mode[mpu] & MPU401_MODE_OUTPUT) == 0) { - spin_unlock_irqrestore (&midi->virtual, flags); + if ((midi->mode[mpu] & MPU401_MODE_OUTPUT) == 0) return; - } if (snd_rawmidi_transmit_empty(midi->substream_output[mpu])) goto __timer; if (output_ready (midi)) { @@ -215,20 +207,16 @@ static void snd_wavefront_midi_output_write(snd_wavefront_card_t *card) del_timer(&midi->timer); } midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; - spin_unlock_irqrestore (&midi->virtual, flags); return; } } else { - spin_unlock_irqrestore (&midi->virtual, flags); return; } - spin_unlock_irqrestore (&midi->virtual, flags); } }
static int snd_wavefront_midi_input_open(struct snd_rawmidi_substream *substream) { - unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu;
@@ -243,17 +231,15 @@ static int snd_wavefront_midi_input_open(struct snd_rawmidi_substream *substream if (!midi) return -EIO;
- spin_lock_irqsave (&midi->open, flags); + guard(spinlock_irqsave)(&midi->open); midi->mode[mpu] |= MPU401_MODE_INPUT; midi->substream_input[mpu] = substream; - spin_unlock_irqrestore (&midi->open, flags);
return 0; }
static int snd_wavefront_midi_output_open(struct snd_rawmidi_substream *substream) { - unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu;
@@ -268,17 +254,15 @@ static int snd_wavefront_midi_output_open(struct snd_rawmidi_substream *substrea if (!midi) return -EIO;
- spin_lock_irqsave (&midi->open, flags); + guard(spinlock_irqsave)(&midi->open); midi->mode[mpu] |= MPU401_MODE_OUTPUT; midi->substream_output[mpu] = substream; - spin_unlock_irqrestore (&midi->open, flags);
return 0; }
static int snd_wavefront_midi_input_close(struct snd_rawmidi_substream *substream) { - unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu;
@@ -293,16 +277,15 @@ static int snd_wavefront_midi_input_close(struct snd_rawmidi_substream *substrea if (!midi) return -EIO;
- spin_lock_irqsave (&midi->open, flags); + guard(spinlock_irqsave)(&midi->open); + midi->substream_input[mpu] = NULL; midi->mode[mpu] &= ~MPU401_MODE_INPUT; - spin_unlock_irqrestore (&midi->open, flags);
return 0; }
static int snd_wavefront_midi_output_close(struct snd_rawmidi_substream *substream) { - unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu;
@@ -317,15 +300,14 @@ static int snd_wavefront_midi_output_close(struct snd_rawmidi_substream *substre if (!midi) return -EIO;
- spin_lock_irqsave (&midi->open, flags); + guard(spinlock_irqsave)(&midi->open); + midi->substream_output[mpu] = NULL; midi->mode[mpu] &= ~MPU401_MODE_OUTPUT; - spin_unlock_irqrestore (&midi->open, flags); return 0; }
static void snd_wavefront_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { - unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu;
@@ -341,30 +323,27 @@ static void snd_wavefront_midi_input_trigger(struct snd_rawmidi_substream *subst if (!midi) return;
- spin_lock_irqsave (&midi->virtual, flags); + guard(spinlock_irqsave)(&midi->virtual); if (up) { midi->mode[mpu] |= MPU401_MODE_INPUT_TRIGGER; } else { midi->mode[mpu] &= ~MPU401_MODE_INPUT_TRIGGER; } - spin_unlock_irqrestore (&midi->virtual, flags); }
static void snd_wavefront_midi_output_timer(struct timer_list *t) { snd_wavefront_midi_t *midi = from_timer(midi, t, timer); snd_wavefront_card_t *card = midi->timer_card; - unsigned long flags; - spin_lock_irqsave (&midi->virtual, flags); - mod_timer(&midi->timer, 1 + jiffies); - spin_unlock_irqrestore (&midi->virtual, flags); + scoped_guard(spinlock_irqsave, &midi->virtual) { + mod_timer(&midi->timer, 1 + jiffies); + } snd_wavefront_midi_output_write(card); }
static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { - unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu;
@@ -380,22 +359,22 @@ static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *subs if (!midi) return;
- spin_lock_irqsave (&midi->virtual, flags); - if (up) { - if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) { - if (!midi->istimer) { - timer_setup(&midi->timer, - snd_wavefront_midi_output_timer, - 0); - mod_timer(&midi->timer, 1 + jiffies); + scoped_guard(spinlock_irqsave, &midi->virtual) { + if (up) { + if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) { + if (!midi->istimer) { + timer_setup(&midi->timer, + snd_wavefront_midi_output_timer, + 0); + mod_timer(&midi->timer, 1 + jiffies); + } + midi->istimer++; + midi->mode[mpu] |= MPU401_MODE_OUTPUT_TRIGGER; } - midi->istimer++; - midi->mode[mpu] |= MPU401_MODE_OUTPUT_TRIGGER; + } else { + midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; } - } else { - midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; } - spin_unlock_irqrestore (&midi->virtual, flags);
if (up) snd_wavefront_midi_output_write((snd_wavefront_card_t *)substream->rmidi->card->private_data); @@ -405,7 +384,6 @@ void snd_wavefront_midi_interrupt (snd_wavefront_card_t *card)
{ - unsigned long flags; snd_wavefront_midi_t *midi; static struct snd_rawmidi_substream *substream = NULL; static int mpu = external_mpu; @@ -419,37 +397,37 @@ snd_wavefront_midi_interrupt (snd_wavefront_card_t *card) return; }
- spin_lock_irqsave (&midi->virtual, flags); - while (--max) { - - if (input_avail (midi)) { - byte = read_data (midi); - - if (midi->isvirtual) { - if (byte == WF_EXTERNAL_SWITCH) { - substream = midi->substream_input[external_mpu]; - mpu = external_mpu; - } else if (byte == WF_INTERNAL_SWITCH) { - substream = midi->substream_output[internal_mpu]; + scoped_guard(spinlock_irqsave, &midi->virtual) { + while (--max) { + + if (input_avail(midi)) { + byte = read_data(midi); + + if (midi->isvirtual) { + if (byte == WF_EXTERNAL_SWITCH) { + substream = midi->substream_input[external_mpu]; + mpu = external_mpu; + } else if (byte == WF_INTERNAL_SWITCH) { + substream = midi->substream_output[internal_mpu]; + mpu = internal_mpu; + } /* else just leave it as it is */ + } else { + substream = midi->substream_input[internal_mpu]; mpu = internal_mpu; - } /* else just leave it as it is */ - } else { - substream = midi->substream_input[internal_mpu]; - mpu = internal_mpu; - } + }
- if (substream == NULL) { - continue; - } + if (substream == NULL) { + continue; + }
- if (midi->mode[mpu] & MPU401_MODE_INPUT_TRIGGER) { - snd_rawmidi_receive(substream, &byte, 1); + if (midi->mode[mpu] & MPU401_MODE_INPUT_TRIGGER) { + snd_rawmidi_receive(substream, &byte, 1); + } + } else { + break; } - } else { - break; } - } - spin_unlock_irqrestore (&midi->virtual, flags); + }
snd_wavefront_midi_output_write(card); } @@ -471,13 +449,10 @@ void snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *card)
{ - unsigned long flags; - - spin_lock_irqsave (&card->wavefront.midi.virtual, flags); + guard(spinlock_irqsave)(&card->wavefront.midi.virtual); // snd_wavefront_midi_input_close (card->ics2115_external_rmidi); // snd_wavefront_midi_output_close (card->ics2115_external_rmidi); card->wavefront.midi.isvirtual = 0; - spin_unlock_irqrestore (&card->wavefront.midi.virtual, flags); }
int diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c index 9eaab9ca4f95..0d78533e1cfd 100644 --- a/sound/isa/wavefront/wavefront_synth.c +++ b/sound/isa/wavefront/wavefront_synth.c @@ -1741,10 +1741,10 @@ snd_wavefront_internal_interrupt (snd_wavefront_card_t *card) return; }
- spin_lock(&dev->irq_lock); - dev->irq_ok = 1; - dev->irq_cnt++; - spin_unlock(&dev->irq_lock); + scoped_guard(spinlock, &dev->irq_lock) { + dev->irq_ok = 1; + dev->irq_cnt++; + } wake_up(&dev->interrupt_sleeper); }
@@ -1796,11 +1796,11 @@ wavefront_should_cause_interrupt (snd_wavefront_t *dev, wait_queue_entry_t wait;
init_waitqueue_entry(&wait, current); - spin_lock_irq(&dev->irq_lock); - add_wait_queue(&dev->interrupt_sleeper, &wait); - dev->irq_ok = 0; - outb (val,port); - spin_unlock_irq(&dev->irq_lock); + scoped_guard(spinlock_irq, &dev->irq_lock) { + add_wait_queue(&dev->interrupt_sleeper, &wait); + dev->irq_ok = 0; + outb(val, port); + } while (!dev->irq_ok && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); barrier(); diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c index d68bf7591d90..e115b9bd7ce3 100644 --- a/sound/pci/hda/cs35l41_hda.c +++ b/sound/pci/hda/cs35l41_hda.c @@ -1865,6 +1865,8 @@ static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, i
cs35l41->dacpi = adev; physdev = get_device(acpi_get_first_physical_node(adev)); + if (!physdev) + return -ENODEV;
sub = acpi_get_subsystem_id(ACPI_HANDLE(physdev)); if (IS_ERR(sub)) diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c index 494460746614..7531e89e35da 100644 --- a/sound/pcmcia/pdaudiocf/pdaudiocf.c +++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c @@ -131,7 +131,13 @@ static int snd_pdacf_probe(struct pcmcia_device *link) link->config_index = 1; link->config_regs = PRESENT_OPTION;
- return pdacf_config(link); + err = pdacf_config(link); + if (err < 0) { + card_list[i] = NULL; + snd_card_free(card); + return err; + } + return 0; }
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c index d2d5f64d63b4..e1f5b8cfeef0 100644 --- a/sound/pcmcia/vx/vxpocket.c +++ b/sound/pcmcia/vx/vxpocket.c @@ -284,7 +284,13 @@ static int vxpocket_probe(struct pcmcia_device *p_dev)
vxp->p_dev = p_dev;
- return vxpocket_config(p_dev); + err = vxpocket_config(p_dev); + if (err < 0) { + card_alloc &= ~(1 << i); + snd_card_free(card); + return err; + } + return 0; }
static void vxpocket_detach(struct pcmcia_device *link) diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c index fb1ab335a4c1..e2e12dbc8cf2 100644 --- a/sound/soc/codecs/ak4458.c +++ b/sound/soc/codecs/ak4458.c @@ -790,16 +790,12 @@ static int ak4458_i2c_probe(struct i2c_client *i2c)
pm_runtime_enable(&i2c->dev); regcache_cache_only(ak4458->regmap, true); - ak4458_reset(ak4458, false);
return 0; }
static void ak4458_i2c_remove(struct i2c_client *i2c) { - struct ak4458_priv *ak4458 = i2c_get_clientdata(i2c); - - ak4458_reset(ak4458, true); pm_runtime_disable(&i2c->dev); }
diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c index 74e69572796b..ae1a3be26837 100644 --- a/sound/soc/codecs/lpass-tx-macro.c +++ b/sound/soc/codecs/lpass-tx-macro.c @@ -2474,7 +2474,8 @@ static const struct tx_macro_data lpass_ver_9_2 = { };
static const struct tx_macro_data lpass_ver_10_sm6115 = { - .flags = LPASS_MACRO_FLAG_HAS_NPL_CLOCK, + .flags = LPASS_MACRO_FLAG_HAS_NPL_CLOCK | + LPASS_MACRO_FLAG_RESET_SWR, .ver = LPASS_VER_10_0_0, .extra_widgets = tx_macro_dapm_widgets_v9_2, .extra_widgets_num = ARRAY_SIZE(tx_macro_dapm_widgets_v9_2), diff --git a/sound/soc/codecs/wcd939x-sdw.c b/sound/soc/codecs/wcd939x-sdw.c index fca95777a75a..e42b0b23b75a 100644 --- a/sound/soc/codecs/wcd939x-sdw.c +++ b/sound/soc/codecs/wcd939x-sdw.c @@ -1480,12 +1480,18 @@ static int wcd9390_probe(struct sdw_slave *pdev, const struct sdw_device_id *id)
ret = component_add(dev, &wcd939x_sdw_component_ops); if (ret) - return ret; + goto err_free_regmap;
/* Set suspended until aggregate device is bind */ pm_runtime_set_suspended(dev);
return 0; + +err_free_regmap: + if (wcd->regmap) + regmap_exit(wcd->regmap); + + return ret; }
static int wcd9390_remove(struct sdw_slave *pdev) diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c index 1530e98df165..75a029a696ac 100644 --- a/sound/soc/qcom/qdsp6/q6adm.c +++ b/sound/soc/qcom/qdsp6/q6adm.c @@ -109,11 +109,75 @@ static struct q6copp *q6adm_find_copp(struct q6adm *adm, int port_idx,
}
+static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp, + struct apr_pkt *pkt, uint32_t rsp_opcode) +{ + struct device *dev = adm->dev; + uint32_t opcode = pkt->hdr.opcode; + int ret; + + mutex_lock(&adm->lock); + copp->result.opcode = 0; + copp->result.status = 0; + ret = apr_send_pkt(adm->apr, pkt); + if (ret < 0) { + dev_err(dev, "Failed to send APR packet\n"); + ret = -EINVAL; + goto err; + } + + /* Wait for the callback with copp id */ + if (rsp_opcode) + ret = wait_event_timeout(copp->wait, + (copp->result.opcode == opcode) || + (copp->result.opcode == rsp_opcode), + msecs_to_jiffies(TIMEOUT_MS)); + else + ret = wait_event_timeout(copp->wait, + (copp->result.opcode == opcode), + msecs_to_jiffies(TIMEOUT_MS)); + + if (!ret) { + dev_err(dev, "ADM copp cmd timedout\n"); + ret = -ETIMEDOUT; + } else if (copp->result.status > 0) { + dev_err(dev, "DSP returned error[%d]\n", + copp->result.status); + ret = -EINVAL; + } + +err: + mutex_unlock(&adm->lock); + return ret; +} + +static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp, + int port_id, int copp_idx) +{ + struct apr_pkt close; + + close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + close.hdr.pkt_size = sizeof(close); + close.hdr.src_port = port_id; + close.hdr.dest_port = copp->id; + close.hdr.token = port_id << 16 | copp_idx; + close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5; + + return q6adm_apr_send_copp_pkt(adm, copp, &close, 0); +} + static void q6adm_free_copp(struct kref *ref) { struct q6copp *c = container_of(ref, struct q6copp, refcount); struct q6adm *adm = c->adm; unsigned long flags; + int ret; + + ret = q6adm_device_close(adm, c, c->afe_port, c->copp_idx); + if (ret < 0) + dev_err(adm->dev, "Failed to close copp %d\n", ret);
spin_lock_irqsave(&adm->copps_list_lock, flags); clear_bit(c->copp_idx, &adm->copp_bitmap[c->afe_port]); @@ -155,13 +219,13 @@ static int q6adm_callback(struct apr_device *adev, struct apr_resp_pkt *data) switch (result->opcode) { case ADM_CMD_DEVICE_OPEN_V5: case ADM_CMD_DEVICE_CLOSE_V5: - copp = q6adm_find_copp(adm, port_idx, copp_idx); - if (!copp) - return 0; - - copp->result = *result; - wake_up(&copp->wait); - kref_put(&copp->refcount, q6adm_free_copp); + list_for_each_entry(copp, &adm->copps_list, node) { + if ((port_idx == copp->afe_port) && (copp_idx == copp->copp_idx)) { + copp->result = *result; + wake_up(&copp->wait); + break; + } + } break; case ADM_CMD_MATRIX_MAP_ROUTINGS_V5: adm->result = *result; @@ -234,65 +298,6 @@ static struct q6copp *q6adm_alloc_copp(struct q6adm *adm, int port_idx) return c; }
-static int q6adm_apr_send_copp_pkt(struct q6adm *adm, struct q6copp *copp, - struct apr_pkt *pkt, uint32_t rsp_opcode) -{ - struct device *dev = adm->dev; - uint32_t opcode = pkt->hdr.opcode; - int ret; - - mutex_lock(&adm->lock); - copp->result.opcode = 0; - copp->result.status = 0; - ret = apr_send_pkt(adm->apr, pkt); - if (ret < 0) { - dev_err(dev, "Failed to send APR packet\n"); - ret = -EINVAL; - goto err; - } - - /* Wait for the callback with copp id */ - if (rsp_opcode) - ret = wait_event_timeout(copp->wait, - (copp->result.opcode == opcode) || - (copp->result.opcode == rsp_opcode), - msecs_to_jiffies(TIMEOUT_MS)); - else - ret = wait_event_timeout(copp->wait, - (copp->result.opcode == opcode), - msecs_to_jiffies(TIMEOUT_MS)); - - if (!ret) { - dev_err(dev, "ADM copp cmd timedout\n"); - ret = -ETIMEDOUT; - } else if (copp->result.status > 0) { - dev_err(dev, "DSP returned error[%d]\n", - copp->result.status); - ret = -EINVAL; - } - -err: - mutex_unlock(&adm->lock); - return ret; -} - -static int q6adm_device_close(struct q6adm *adm, struct q6copp *copp, - int port_id, int copp_idx) -{ - struct apr_pkt close; - - close.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, - APR_HDR_LEN(APR_HDR_SIZE), - APR_PKT_VER); - close.hdr.pkt_size = sizeof(close); - close.hdr.src_port = port_id; - close.hdr.dest_port = copp->id; - close.hdr.token = port_id << 16 | copp_idx; - close.hdr.opcode = ADM_CMD_DEVICE_CLOSE_V5; - - return q6adm_apr_send_copp_pkt(adm, copp, &close, 0); -} - static struct q6copp *q6adm_find_matching_copp(struct q6adm *adm, int port_id, int topology, int mode, int rate, @@ -567,15 +572,6 @@ EXPORT_SYMBOL_GPL(q6adm_matrix_map); */ int q6adm_close(struct device *dev, struct q6copp *copp) { - struct q6adm *adm = dev_get_drvdata(dev->parent); - int ret = 0; - - ret = q6adm_device_close(adm, copp, copp->afe_port, copp->copp_idx); - if (ret < 0) { - dev_err(adm->dev, "Failed to close copp %d\n", ret); - return ret; - } - kref_put(&copp->refcount, q6adm_free_copp);
return 0; diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c index 2cd522108221..01299bad456d 100644 --- a/sound/soc/qcom/qdsp6/q6apm-dai.c +++ b/sound/soc/qcom/qdsp6/q6apm-dai.c @@ -85,6 +85,7 @@ static const struct snd_pcm_hardware q6apm_dai_hardware_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | + SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR | SNDRV_PCM_INFO_BATCH), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE), .rates = SNDRV_PCM_RATE_8000_48000, @@ -104,6 +105,7 @@ static const struct snd_pcm_hardware q6apm_dai_hardware_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | + SNDRV_PCM_INFO_NO_REWINDS | SNDRV_PCM_INFO_SYNC_APPLPTR | SNDRV_PCM_INFO_BATCH), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE), .rates = SNDRV_PCM_RATE_8000_192000, diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index a400c9a31fea..9f1c5e2676dd 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -232,13 +232,14 @@ static int q6asm_dai_prepare(struct snd_soc_component *component, prtd->pcm_count = snd_pcm_lib_period_bytes(substream); prtd->pcm_irq_pos = 0; /* rate and channels are sent to audio driver */ - if (prtd->state) { + if (prtd->state == Q6ASM_STREAM_RUNNING) { /* clear the previous setup if any */ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE); q6asm_unmap_memory_regions(substream->stream, prtd->audio_client); q6routing_stream_close(soc_prtd->dai_link->id, substream->stream); + prtd->state = Q6ASM_STREAM_STOPPED; }
ret = q6asm_map_memory_regions(substream->stream, prtd->audio_client, @@ -402,13 +403,13 @@ static int q6asm_dai_open(struct snd_soc_component *component, }
ret = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32); + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480); if (ret < 0) { dev_err(dev, "constraint for period bytes step ret = %d\n", ret); } ret = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32); + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480); if (ret < 0) { dev_err(dev, "constraint for buffer bytes step ret = %d\n", ret); diff --git a/sound/soc/qcom/sc7280.c b/sound/soc/qcom/sc7280.c index 230af8d7b205..bc12aa5064ce 100644 --- a/sound/soc/qcom/sc7280.c +++ b/sound/soc/qcom/sc7280.c @@ -317,7 +317,7 @@ static void sc7280_snd_shutdown(struct snd_pcm_substream *substream) struct snd_soc_card *card = rtd->card; struct sc7280_snd_data *data = snd_soc_card_get_drvdata(card); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); - struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id]; + struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
switch (cpu_dai->id) { case MI2S_PRIMARY: diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c index 916a156f991f..1c482a012920 100644 --- a/sound/soc/qcom/sc8280xp.c +++ b/sound/soc/qcom/sc8280xp.c @@ -69,7 +69,7 @@ static void sc8280xp_snd_shutdown(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); struct sc8280xp_snd_data *pdata = snd_soc_card_get_drvdata(rtd->card); - struct sdw_stream_runtime *sruntime = pdata->sruntime[cpu_dai->id]; + struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
pdata->sruntime[cpu_dai->id] = NULL; sdw_release_stream(sruntime); diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c index f2eda2ff46c0..52e9ad24b5ee 100644 --- a/sound/soc/qcom/sdw.c +++ b/sound/soc/qcom/sdw.c @@ -7,6 +7,37 @@ #include <sound/soc.h> #include "sdw.h"
+static bool qcom_snd_is_sdw_dai(int id) +{ + switch (id) { + case WSA_CODEC_DMA_RX_0: + case WSA_CODEC_DMA_TX_0: + case WSA_CODEC_DMA_RX_1: + case WSA_CODEC_DMA_TX_1: + case WSA_CODEC_DMA_TX_2: + case RX_CODEC_DMA_RX_0: + case TX_CODEC_DMA_TX_0: + case RX_CODEC_DMA_RX_1: + case TX_CODEC_DMA_TX_1: + case RX_CODEC_DMA_RX_2: + case TX_CODEC_DMA_TX_2: + case RX_CODEC_DMA_RX_3: + case TX_CODEC_DMA_TX_3: + case RX_CODEC_DMA_RX_4: + case TX_CODEC_DMA_TX_4: + case RX_CODEC_DMA_RX_5: + case TX_CODEC_DMA_TX_5: + case RX_CODEC_DMA_RX_6: + case RX_CODEC_DMA_RX_7: + case SLIMBUS_0_RX...SLIMBUS_6_TX: + return true; + default: + break; + } + + return false; +} + /** * qcom_snd_sdw_startup() - Helper to start Soundwire stream for SoC audio card * @substream: The PCM substream from audio, as passed to snd_soc_ops->startup() @@ -27,7 +58,10 @@ int qcom_snd_sdw_startup(struct snd_pcm_substream *substream) struct snd_soc_dai *codec_dai; int ret, i;
- sruntime = sdw_alloc_stream(cpu_dai->name); + if (!qcom_snd_is_sdw_dai(cpu_dai->id)) + return 0; + + sruntime = sdw_alloc_stream(cpu_dai->name, SDW_STREAM_PCM); if (!sruntime) return -ENOMEM;
@@ -61,19 +95,8 @@ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, if (!sruntime) return 0;
- switch (cpu_dai->id) { - case WSA_CODEC_DMA_RX_0: - case WSA_CODEC_DMA_RX_1: - case RX_CODEC_DMA_RX_0: - case RX_CODEC_DMA_RX_1: - case TX_CODEC_DMA_TX_0: - case TX_CODEC_DMA_TX_1: - case TX_CODEC_DMA_TX_2: - case TX_CODEC_DMA_TX_3: - break; - default: + if (!qcom_snd_is_sdw_dai(cpu_dai->id)) return 0; - }
if (*stream_prepared) return 0; @@ -101,9 +124,7 @@ int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, } EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct sdw_stream_runtime **psruntime) +struct sdw_stream_runtime *qcom_snd_sdw_get_stream(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai; @@ -111,21 +132,23 @@ int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, struct sdw_stream_runtime *sruntime; int i;
- switch (cpu_dai->id) { - case WSA_CODEC_DMA_RX_0: - case RX_CODEC_DMA_RX_0: - case RX_CODEC_DMA_RX_1: - case TX_CODEC_DMA_TX_0: - case TX_CODEC_DMA_TX_1: - case TX_CODEC_DMA_TX_2: - case TX_CODEC_DMA_TX_3: - for_each_rtd_codec_dais(rtd, i, codec_dai) { - sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream); - if (sruntime != ERR_PTR(-ENOTSUPP)) - *psruntime = sruntime; - } - break; + if (!qcom_snd_is_sdw_dai(cpu_dai->id)) + return NULL; + + for_each_rtd_codec_dais(rtd, i, codec_dai) { + sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream); + if (sruntime != ERR_PTR(-ENOTSUPP)) + return sruntime; } + return NULL; +} +EXPORT_SYMBOL_GPL(qcom_snd_sdw_get_stream); + +int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct sdw_stream_runtime **psruntime) +{ + *psruntime = qcom_snd_sdw_get_stream(substream);
return 0;
@@ -138,23 +161,13 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream, struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0);
- switch (cpu_dai->id) { - case WSA_CODEC_DMA_RX_0: - case WSA_CODEC_DMA_RX_1: - case RX_CODEC_DMA_RX_0: - case RX_CODEC_DMA_RX_1: - case TX_CODEC_DMA_TX_0: - case TX_CODEC_DMA_TX_1: - case TX_CODEC_DMA_TX_2: - case TX_CODEC_DMA_TX_3: - if (sruntime && *stream_prepared) { - sdw_disable_stream(sruntime); - sdw_deprepare_stream(sruntime); - *stream_prepared = false; - } - break; - default: - break; + if (!qcom_snd_is_sdw_dai(cpu_dai->id)) + return 0; + + if (sruntime && *stream_prepared) { + sdw_disable_stream(sruntime); + sdw_deprepare_stream(sruntime); + *stream_prepared = false; }
return 0; diff --git a/sound/soc/qcom/sdw.h b/sound/soc/qcom/sdw.h index 392e3455f1b1..b8bc5beb0522 100644 --- a/sound/soc/qcom/sdw.h +++ b/sound/soc/qcom/sdw.h @@ -10,6 +10,7 @@ int qcom_snd_sdw_startup(struct snd_pcm_substream *substream); int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream, struct sdw_stream_runtime *runtime, bool *stream_prepared); +struct sdw_stream_runtime *qcom_snd_sdw_get_stream(struct snd_pcm_substream *stream); int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct sdw_stream_runtime **psruntime); diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c index 1001fd321380..ff1ef5363983 100644 --- a/sound/soc/qcom/sm8250.c +++ b/sound/soc/qcom/sm8250.c @@ -86,7 +86,7 @@ static void sm2450_snd_shutdown(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); struct sm8250_snd_data *data = snd_soc_card_get_drvdata(rtd->card); - struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id]; + struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
data->sruntime[cpu_dai->id] = NULL; sdw_release_stream(sruntime); diff --git a/sound/soc/qcom/x1e80100.c b/sound/soc/qcom/x1e80100.c index 898b5c26bf1e..3645fafcd45a 100644 --- a/sound/soc/qcom/x1e80100.c +++ b/sound/soc/qcom/x1e80100.c @@ -55,7 +55,7 @@ static void x1e80100_snd_shutdown(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); struct x1e80100_snd_data *data = snd_soc_card_get_drvdata(rtd->card); - struct sdw_stream_runtime *sruntime = data->sruntime[cpu_dai->id]; + struct sdw_stream_runtime *sruntime = qcom_snd_sdw_get_stream(substream);
data->sruntime[cpu_dai->id] = NULL; sdw_release_stream(sruntime); diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c index 4f483bfa584f..82c70a9714e5 100644 --- a/sound/soc/sh/rz-ssi.c +++ b/sound/soc/sh/rz-ssi.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/reset.h> +#include <sound/pcm_params.h> #include <sound/soc.h>
/* REGISTER OFFSET */ @@ -85,7 +86,6 @@ struct rz_ssi_stream { int fifo_sample_size; /* sample capacity of SSI FIFO */ int dma_buffer_pos; /* The address for the next DMA descriptor */ int period_counter; /* for keeping track of periods transferred */ - int sample_width; int buffer_pos; /* current frame position in the buffer */ int running; /* 0=stopped, 1=running */
@@ -132,6 +132,12 @@ struct rz_ssi_priv { bool bckp_rise; /* Bit clock polarity (SSICR.BCKP) */ bool dma_rt;
+ struct { + bool tx_active; + bool rx_active; + bool one_stream_triggered; + } dup; + /* Full duplex communication support */ struct { unsigned int rate; @@ -225,10 +231,7 @@ static inline bool rz_ssi_is_stream_running(struct rz_ssi_stream *strm) static void rz_ssi_stream_init(struct rz_ssi_stream *strm, struct snd_pcm_substream *substream) { - struct snd_pcm_runtime *runtime = substream->runtime; - rz_ssi_set_substream(strm, substream); - strm->sample_width = samples_to_bytes(runtime, 1); strm->dma_buffer_pos = 0; strm->period_counter = 0; strm->buffer_pos = 0; @@ -352,13 +355,12 @@ static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) bool is_full_duplex; u32 ssicr, ssifcr;
- is_full_duplex = rz_ssi_is_stream_running(&ssi->playback) || - rz_ssi_is_stream_running(&ssi->capture); + is_full_duplex = ssi->dup.tx_active && ssi->dup.rx_active; ssicr = rz_ssi_reg_readl(ssi, SSICR); ssifcr = rz_ssi_reg_readl(ssi, SSIFCR); if (!is_full_duplex) { ssifcr &= ~0xF; - } else { + } else if (ssi->dup.one_stream_triggered) { rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0); rz_ssi_set_idle(ssi); ssifcr &= ~SSIFCR_FIFO_RST; @@ -394,12 +396,16 @@ static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) SSISR_RUIRQ), 0);
strm->running = 1; - if (is_full_duplex) - ssicr |= SSICR_TEN | SSICR_REN; - else + if (!is_full_duplex) { ssicr |= is_play ? SSICR_TEN : SSICR_REN; - - rz_ssi_reg_writel(ssi, SSICR, ssicr); + rz_ssi_reg_writel(ssi, SSICR, ssicr); + } else if (ssi->dup.one_stream_triggered) { + ssicr |= SSICR_TEN | SSICR_REN; + rz_ssi_reg_writel(ssi, SSICR, ssicr); + ssi->dup.one_stream_triggered = false; + } else { + ssi->dup.one_stream_triggered = true; + }
return 0; } @@ -897,6 +903,30 @@ static int rz_ssi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) return 0; }
+static int rz_ssi_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + ssi->dup.tx_active = true; + else + ssi->dup.rx_active = true; + + return 0; +} + +static void rz_ssi_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + ssi->dup.tx_active = false; + else + ssi->dup.rx_active = false; +} + static bool rz_ssi_is_valid_hw_params(struct rz_ssi_priv *ssi, unsigned int rate, unsigned int channels, unsigned int sample_width, @@ -927,9 +957,9 @@ static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); - struct rz_ssi_stream *strm = rz_ssi_stream_get(ssi, substream); unsigned int sample_bits = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min; + unsigned int sample_width = params_width(params); unsigned int channels = params_channels(params); unsigned int rate = params_rate(params);
@@ -947,21 +977,21 @@ static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream,
if (rz_ssi_is_stream_running(&ssi->playback) || rz_ssi_is_stream_running(&ssi->capture)) { - if (rz_ssi_is_valid_hw_params(ssi, rate, channels, - strm->sample_width, sample_bits)) + if (rz_ssi_is_valid_hw_params(ssi, rate, channels, sample_width, sample_bits)) return 0;
dev_err(ssi->dev, "Full duplex needs same HW params\n"); return -EINVAL; }
- rz_ssi_cache_hw_params(ssi, rate, channels, strm->sample_width, - sample_bits); + rz_ssi_cache_hw_params(ssi, rate, channels, sample_width, sample_bits);
return rz_ssi_clk_setup(ssi, rate, channels); }
static const struct snd_soc_dai_ops rz_ssi_dai_ops = { + .startup = rz_ssi_startup, + .shutdown = rz_ssi_shutdown, .trigger = rz_ssi_dai_trigger, .set_fmt = rz_ssi_dai_set_fmt, .hw_params = rz_ssi_dai_hw_params, diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c index b45ee7e24f22..c46e09a568c5 100644 --- a/sound/soc/stm/stm32_sai.c +++ b/sound/soc/stm/stm32_sai.c @@ -122,30 +122,24 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client, if (!pdev) { dev_err(&sai_client->pdev->dev, "Device not found for node %pOFn\n", np_provider); - of_node_put(np_provider); return -ENODEV; }
sai_provider = platform_get_drvdata(pdev); + put_device(&pdev->dev); if (!sai_provider) { dev_err(&sai_client->pdev->dev, "SAI sync provider data not found\n"); - ret = -EINVAL; - goto error; + return -EINVAL; }
/* Configure sync client */ ret = stm32_sai_sync_conf_client(sai_client, synci); if (ret < 0) - goto error; + return ret;
/* Configure sync provider */ - ret = stm32_sai_sync_conf_provider(sai_provider, synco); - -error: - put_device(&pdev->dev); - of_node_put(np_provider); - return ret; + return stm32_sai_sync_conf_provider(sai_provider, synco); }
static int stm32_sai_probe(struct platform_device *pdev) diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index 5938d6361e1e..fb1bd9844b55 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -1453,7 +1453,8 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, dev_err(&pdev->dev, "External synchro not supported\n"); of_node_put(args.np); - return -EINVAL; + ret = -EINVAL; + goto err_put_sync_provider; } sai->sync = SAI_SYNC_EXTERNAL;
@@ -1462,7 +1463,8 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, (sai->synci > (SAI_GCR_SYNCIN_MAX + 1))) { dev_err(&pdev->dev, "Wrong SAI index\n"); of_node_put(args.np); - return -EINVAL; + ret = -EINVAL; + goto err_put_sync_provider; }
if (of_property_match_string(args.np, "compatible", @@ -1476,7 +1478,8 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, if (!sai->synco) { dev_err(&pdev->dev, "Unknown SAI sub-block\n"); of_node_put(args.np); - return -EINVAL; + ret = -EINVAL; + goto err_put_sync_provider; } }
@@ -1486,13 +1489,15 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
of_node_put(args.np); sai->sai_ck = devm_clk_get(&pdev->dev, "sai_ck"); - if (IS_ERR(sai->sai_ck)) - return dev_err_probe(&pdev->dev, PTR_ERR(sai->sai_ck), - "Missing kernel clock sai_ck\n"); + if (IS_ERR(sai->sai_ck)) { + ret = dev_err_probe(&pdev->dev, PTR_ERR(sai->sai_ck), + "Missing kernel clock sai_ck\n"); + goto err_put_sync_provider; + }
ret = clk_prepare(sai->pdata->pclk); if (ret < 0) - return ret; + goto err_put_sync_provider;
if (STM_SAI_IS_F4(sai->pdata)) return 0; @@ -1501,14 +1506,23 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev, if (of_property_present(np, "#clock-cells")) { ret = stm32_sai_add_mclk_provider(sai); if (ret < 0) - return ret; + goto err_unprepare_pclk; } else { sai->sai_mclk = devm_clk_get_optional(&pdev->dev, "MCLK"); - if (IS_ERR(sai->sai_mclk)) - return PTR_ERR(sai->sai_mclk); + if (IS_ERR(sai->sai_mclk)) { + ret = PTR_ERR(sai->sai_mclk); + goto err_unprepare_pclk; + } }
return 0; + +err_unprepare_pclk: + clk_unprepare(sai->pdata->pclk); +err_put_sync_provider: + of_node_put(sai->np_sync_provider); + + return ret; }
static int stm32_sai_sub_probe(struct platform_device *pdev) @@ -1548,26 +1562,34 @@ static int stm32_sai_sub_probe(struct platform_device *pdev) IRQF_SHARED, dev_name(&pdev->dev), sai); if (ret) { dev_err(&pdev->dev, "IRQ request returned %d\n", ret); - return ret; + goto err_unprepare_pclk; }
if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) conf = &stm32_sai_pcm_config_spdif;
ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0); - if (ret) - return dev_err_probe(&pdev->dev, ret, "Could not register pcm dma\n"); + if (ret) { + ret = dev_err_probe(&pdev->dev, ret, "Could not register pcm dma\n"); + goto err_unprepare_pclk; + }
ret = snd_soc_register_component(&pdev->dev, &stm32_component, &sai->cpu_dai_drv, 1); if (ret) { snd_dmaengine_pcm_unregister(&pdev->dev); - return ret; + goto err_unprepare_pclk; }
pm_runtime_enable(&pdev->dev);
return 0; + +err_unprepare_pclk: + clk_unprepare(sai->pdata->pclk); + of_node_put(sai->np_sync_provider); + + return ret; }
static void stm32_sai_sub_remove(struct platform_device *pdev) @@ -1578,6 +1600,7 @@ static void stm32_sai_sub_remove(struct platform_device *pdev) snd_dmaengine_pcm_unregister(&pdev->dev); snd_soc_unregister_component(&pdev->dev); pm_runtime_disable(&pdev->dev); + of_node_put(sai->np_sync_provider); }
#ifdef CONFIG_PM_SLEEP diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c index 20ac32635f1f..d05cb54de788 100644 --- a/sound/usb/mixer_us16x08.c +++ b/sound/usb/mixer_us16x08.c @@ -656,17 +656,25 @@ static void get_meter_levels_from_urb(int s, u8 *meter_urb) { int val = MUC2(meter_urb, s) + (MUC3(meter_urb, s) << 8); + int ch = MUB2(meter_urb, s) - 1; + + if (ch < 0) + return;
if (MUA0(meter_urb, s) == 0x61 && MUA1(meter_urb, s) == 0x02 && MUA2(meter_urb, s) == 0x04 && MUB0(meter_urb, s) == 0x62) { - if (MUC0(meter_urb, s) == 0x72) - store->meter_level[MUB2(meter_urb, s) - 1] = val; - if (MUC0(meter_urb, s) == 0xb2) - store->comp_level[MUB2(meter_urb, s) - 1] = val; + if (ch < SND_US16X08_MAX_CHANNELS) { + if (MUC0(meter_urb, s) == 0x72) + store->meter_level[ch] = val; + if (MUC0(meter_urb, s) == 0xb2) + store->comp_level[ch] = val; + } } if (MUA0(meter_urb, s) == 0x61 && MUA1(meter_urb, s) == 0x02 && - MUA2(meter_urb, s) == 0x02 && MUB0(meter_urb, s) == 0x62) - store->master_level[MUB2(meter_urb, s) - 1] = val; + MUA2(meter_urb, s) == 0x02 && MUB0(meter_urb, s) == 0x62) { + if (ch < ARRAY_SIZE(store->master_level)) + store->master_level[ch] = val; + } }
/* Function to retrieve current meter values from the device. diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index cae799ad44e1..e5938b91199f 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -409,10 +409,12 @@ struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map) .cpu = -1 };
- // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well. - return __perf_cpu_map__nr(map) > 0 - ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1) - : result; + if (!map) + return result; + + // The CPUs are always sorted and nr is always > 0 as 0 length map is + // encoded as NULL. + return __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1); }
/** Is 'b' a subset of 'a'. */ diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c index e1f264444342..42bad1324501 100644 --- a/tools/mm/page_owner_sort.c +++ b/tools/mm/page_owner_sort.c @@ -183,7 +183,11 @@ static int compare_ts(const void *p1, const void *p2) { const struct block_list *l1 = p1, *l2 = p2;
- return l1->ts_nsec < l2->ts_nsec ? -1 : 1; + if (l1->ts_nsec < l2->ts_nsec) + return -1; + if (l1->ts_nsec > l2->ts_nsec) + return 1; + return 0; }
static int compare_cull_condition(const void *p1, const void *p2) diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl index 6fd864935319..bee7cb34a289 100755 --- a/tools/testing/ktest/config-bisect.pl +++ b/tools/testing/ktest/config-bisect.pl @@ -741,9 +741,9 @@ if ($start) { die "Can not find file $bad\n"; } if ($val eq "good") { - run_command "cp $output_config $good" or die "failed to copy $config to $good\n"; + run_command "cp $output_config $good" or die "failed to copy $output_config to $good\n"; } elsif ($val eq "bad") { - run_command "cp $output_config $bad" or die "failed to copy $config to $bad\n"; + run_command "cp $output_config $bad" or die "failed to copy $output_config to $bad\n"; } }
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index cfd4378e2129..f87e9f251d13 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -670,6 +670,7 @@ static int nfit_test_search_spa(struct nvdimm_bus *bus, .addr = spa->spa, .region = NULL, }; + struct nfit_mem *nfit_mem; u64 dpa;
ret = device_for_each_child(&bus->dev, &ctx, @@ -687,8 +688,12 @@ static int nfit_test_search_spa(struct nvdimm_bus *bus, */ nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1]; nvdimm = nd_mapping->nvdimm; + nfit_mem = nvdimm_provider_data(nvdimm); + if (!nfit_mem) + return -EINVAL;
- spa->devices[0].nfit_device_handle = handle[nvdimm->id]; + spa->devices[0].nfit_device_handle = + __to_nfit_memdev(nfit_mem)->device_handle; spa->num_nvdimms = 1; spa->devices[0].dpa = dpa;
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 84b8c3c92c79..c1002722c5b9 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -57,6 +57,26 @@ void idr_alloc_test(void) idr_destroy(&idr); }
+void idr_alloc2_test(void) +{ + int id; + struct idr idr = IDR_INIT_BASE(idr, 1); + + id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL); + assert(id == -ENOSPC); + + id = idr_alloc(&idr, idr_alloc2_test, 1, 2, GFP_KERNEL); + assert(id == 1); + + id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL); + assert(id == -ENOSPC); + + id = idr_alloc(&idr, idr_alloc2_test, 0, 2, GFP_KERNEL); + assert(id == -ENOSPC); + + idr_destroy(&idr); +} + void idr_replace_test(void) { DEFINE_IDR(idr); @@ -409,6 +429,7 @@ void idr_checks(void)
idr_replace_test(); idr_alloc_test(); + idr_alloc2_test(); idr_null_test(); idr_nowait_test(); idr_get_next_test(0); diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc index aee22289536b..1b57771dbfdf 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc @@ -90,9 +90,10 @@ if [ $on != "0" ]; then fail "Tracing is not off" fi
-csum1=`md5sum trace` +# Cannot rely on names being around as they are only cached, strip them +csum1=`cat trace | sed -e 's/^ *[^ ]*(-[0-9][0-9]*)/\1/' | md5sum` sleep $SLEEP_TIME -csum2=`md5sum trace` +csum2=`cat trace | sed -e 's/^ *[^ ]*(-[0-9][0-9]*)/\1/' | md5sum`
if [ "$csum1" != "$csum2" ]; then fail "Tracing file is still changing" diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 7a535c590245..6f9926836533 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -194,12 +194,14 @@ FIXTURE(iommufd_ioas) uint32_t hwpt_id; uint32_t device_id; uint64_t base_iova; + uint32_t device_pasid_id; };
FIXTURE_VARIANT(iommufd_ioas) { unsigned int mock_domains; unsigned int memory_limit; + bool pasid_capable; };
FIXTURE_SETUP(iommufd_ioas) @@ -222,6 +224,12 @@ FIXTURE_SETUP(iommufd_ioas) &self->hwpt_id, &self->device_id); self->base_iova = MOCK_APERTURE_START; } + + if (variant->pasid_capable) + test_cmd_mock_domain_flags(self->ioas_id, + MOCK_FLAGS_DEVICE_PASID, + NULL, NULL, + &self->device_pasid_id); }
FIXTURE_TEARDOWN(iommufd_ioas) @@ -237,6 +245,7 @@ FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain) FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain) { .mock_domains = 1, + .pasid_capable = true, };
FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain) @@ -597,30 +606,54 @@ TEST_F(iommufd_ioas, get_hw_info) struct iommu_test_hw_info info; uint64_t trailing_bytes; } buffer_larger; - struct iommu_test_hw_info_buffer_smaller { - __u32 flags; - } buffer_smaller;
if (self->device_id) { + uint8_t max_pasid = 0; + /* Provide a zero-size user_buffer */ - test_cmd_get_hw_info(self->device_id, NULL, 0); + test_cmd_get_hw_info(self->device_id, + IOMMU_HW_INFO_TYPE_DEFAULT, NULL, 0); /* Provide a user_buffer with exact size */ - test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact)); + test_cmd_get_hw_info(self->device_id, + IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact, + sizeof(buffer_exact)); + + /* Request for a wrong data_type, and a correct one */ + test_err_get_hw_info(EOPNOTSUPP, self->device_id, + IOMMU_HW_INFO_TYPE_SELFTEST + 1, + &buffer_exact, sizeof(buffer_exact)); + test_cmd_get_hw_info(self->device_id, + IOMMU_HW_INFO_TYPE_SELFTEST, &buffer_exact, + sizeof(buffer_exact)); /* * Provide a user_buffer with size larger than the exact size to check if * kernel zero the trailing bytes. */ - test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger)); + test_cmd_get_hw_info(self->device_id, + IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger, + sizeof(buffer_larger)); /* * Provide a user_buffer with size smaller than the exact size to check if * the fields within the size range still gets updated. */ - test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller)); + test_cmd_get_hw_info(self->device_id, + IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact, + offsetofend(struct iommu_test_hw_info, + flags)); + test_cmd_get_hw_info_pasid(self->device_id, &max_pasid); + ASSERT_EQ(0, max_pasid); + if (variant->pasid_capable) { + test_cmd_get_hw_info_pasid(self->device_pasid_id, + &max_pasid); + ASSERT_EQ(MOCK_PASID_WIDTH, max_pasid); + } } else { test_err_get_hw_info(ENOENT, self->device_id, - &buffer_exact, sizeof(buffer_exact)); + IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_exact, + sizeof(buffer_exact)); test_err_get_hw_info(ENOENT, self->device_id, - &buffer_larger, sizeof(buffer_larger)); + IOMMU_HW_INFO_TYPE_DEFAULT, &buffer_larger, + sizeof(buffer_larger)); } }
@@ -1927,8 +1960,7 @@ TEST_F(iommufd_dirty_tracking, device_dirty_capability)
test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id); test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); - test_cmd_get_hw_capabilities(self->idev_id, caps, - IOMMU_HW_CAP_DIRTY_TRACKING); + test_cmd_get_hw_capabilities(self->idev_id, caps); ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING, caps & IOMMU_HW_CAP_DIRTY_TRACKING);
diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index c5d5e69452b0..e2012d128e11 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -612,7 +612,8 @@ TEST_FAIL_NTH(basic_fail_nth, device) &idev_id)) return -1;
- if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL)) + if (_test_cmd_get_hw_info(self->fd, idev_id, IOMMU_HW_INFO_TYPE_DEFAULT, + &info, sizeof(info), NULL, NULL)) return -1;
if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id, diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index 40f6f14ce136..9668f2268bd9 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -637,19 +637,24 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata) #endif
/* @data can be NULL */ -static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, - size_t data_len, uint32_t *capabilities) +static int _test_cmd_get_hw_info(int fd, __u32 device_id, __u32 data_type, + void *data, size_t data_len, + uint32_t *capabilities, uint8_t *max_pasid) { struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data; struct iommu_hw_info cmd = { .size = sizeof(cmd), .dev_id = device_id, .data_len = data_len, + .in_data_type = data_type, .data_uptr = (uint64_t)data, .out_capabilities = 0, }; int ret;
+ if (data_type != IOMMU_HW_INFO_TYPE_DEFAULT) + cmd.flags |= IOMMU_HW_INFO_FLAG_INPUT_TYPE; + ret = ioctl(fd, IOMMU_GET_HW_INFO, &cmd); if (ret) return ret; @@ -683,22 +688,33 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, assert(!info->flags); }
+ if (max_pasid) + *max_pasid = cmd.out_max_pasid_log2; + if (capabilities) *capabilities = cmd.out_capabilities;
return 0; }
-#define test_cmd_get_hw_info(device_id, data, data_len) \ - ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \ - data_len, NULL)) +#define test_cmd_get_hw_info(device_id, data_type, data, data_len) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data_type, \ + data, data_len, NULL, NULL))
-#define test_err_get_hw_info(_errno, device_id, data, data_len) \ - EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \ - data_len, NULL)) - -#define test_cmd_get_hw_capabilities(device_id, caps, mask) \ - ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps)) +#define test_err_get_hw_info(_errno, device_id, data_type, data, data_len) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_get_hw_info(self->fd, device_id, data_type, \ + data, data_len, NULL, NULL)) + +#define test_cmd_get_hw_capabilities(device_id, caps) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \ + IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \ + 0, &caps, NULL)) + +#define test_cmd_get_hw_info_pasid(device_id, max_pasid) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \ + IOMMU_HW_INFO_TYPE_DEFAULT, NULL, \ + 0, NULL, max_pasid))
static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd) { diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh index ac7ec6f94023..8f3e4978717a 100755 --- a/tools/testing/selftests/net/mptcp/pm_netlink.sh +++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh @@ -191,6 +191,10 @@ check "show_endpoints" \ flush_endpoint check "show_endpoints" "" "flush addrs"
+add_endpoint 10.0.1.1 flags unknown +check "show_endpoints" "$(format_endpoints "1,10.0.1.1")" "ignore unknown flags" +flush_endpoint + set_limits 9 1 2>/dev/null check "get_limits" "${default_limits}" "rcv addrs above hard limit"
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c index 93fea3442216..0e7fcff363fd 100644 --- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c +++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c @@ -23,6 +23,8 @@ #define IPPROTO_MPTCP 262 #endif
+#define MPTCP_PM_ADDR_FLAG_UNKNOWN _BITUL(7) + static void syntax(char *argv[]) { fprintf(stderr, "%s add|ann|rem|csf|dsf|get|set|del|flush|dump|events|listen|accept [<args>]\n", argv[0]); @@ -827,6 +829,8 @@ int add_addr(int fd, int pm_family, int argc, char *argv[]) flags |= MPTCP_PM_ADDR_FLAG_BACKUP; else if (!strcmp(tok, "fullmesh")) flags |= MPTCP_PM_ADDR_FLAG_FULLMESH; + else if (!strcmp(tok, "unknown")) + flags |= MPTCP_PM_ADDR_FLAG_UNKNOWN; else error(1, errno, "unknown flag %s", argv[arg]); @@ -1032,6 +1036,13 @@ static void print_addr(struct rtattr *attrs, int len) printf(","); }
+ if (flags & MPTCP_PM_ADDR_FLAG_UNKNOWN) { + printf("unknown"); + flags &= ~MPTCP_PM_ADDR_FLAG_UNKNOWN; + if (flags) + printf(","); + } + /* bump unknown flags, if any */ if (flags) printf("0x%x", flags); diff --git a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c index 507930cee8cb..462d628cc3bd 100644 --- a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c +++ b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.c @@ -33,9 +33,14 @@ static void die(const char *e) exit(111); }
-static void die_port(uint16_t got, uint16_t want) +static void die_port(const struct sockaddr_in *sin, uint16_t want) { - fprintf(stderr, "Port number changed, wanted %d got %d\n", want, ntohs(got)); + uint16_t got = ntohs(sin->sin_port); + char str[INET_ADDRSTRLEN]; + + inet_ntop(AF_INET, &sin->sin_addr, str, sizeof(str)); + + fprintf(stderr, "Port number changed, wanted %d got %d from %s\n", want, got, str); exit(1); }
@@ -100,7 +105,7 @@ int main(int argc, char *argv[]) die("child recvfrom");
if (peer.sin_port != htons(PORT)) - die_port(peer.sin_port, PORT); + die_port(&peer, PORT); } else { if (sendto(s2, buf, LEN, 0, (struct sockaddr *)&sa1, sizeof(sa1)) != LEN) continue; @@ -109,7 +114,7 @@ int main(int argc, char *argv[]) die("parent recvfrom");
if (peer.sin_port != htons((PORT + 1))) - die_port(peer.sin_port, PORT + 1); + die_port(&peer, PORT + 1); } }
diff --git a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh index a24c896347a8..dc7e9d6da062 100755 --- a/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh +++ b/tools/testing/selftests/net/netfilter/conntrack_reverse_clash.sh @@ -45,6 +45,8 @@ if ip netns exec "$ns0" ./conntrack_reverse_clash; then echo "PASS: No SNAT performed for null bindings" else echo "ERROR: SNAT performed without any matching snat rule" + ip netns exec "$ns0" conntrack -L + ip netns exec "$ns0" conntrack -S exit 1 fi
diff --git a/tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt b/tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt index 3442cd29bc93..cdb3910af95b 100644 --- a/tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt +++ b/tools/testing/selftests/net/netfilter/packetdrill/conntrack_syn_challenge_ack.pkt @@ -26,7 +26,7 @@
+0.01 > R 643160523:643160523(0) win 0
-+0.01 `conntrack -f $NFCT_IP_VERSION -L -p tcp --dport 8080 2>/dev/null | grep UNREPLIED | grep -q SYN_SENT` ++0.1 `conntrack -f $NFCT_IP_VERSION -L -p tcp --dport 8080 2>/dev/null | grep UNREPLIED | grep -q SYN_SENT`
// Must go through. +0.01 > S 0:0(0) win 65535 <mss 1460,sackOK,TS val 1 ecr 0,nop,wscale 8> diff --git a/tools/testing/selftests/net/tap.c b/tools/testing/selftests/net/tap.c index 247c3b3ac1c9..51a209014f1c 100644 --- a/tools/testing/selftests/net/tap.c +++ b/tools/testing/selftests/net/tap.c @@ -56,18 +56,12 @@ static void rtattr_end(struct nlmsghdr *nh, struct rtattr *attr) static struct rtattr *rtattr_add_str(struct nlmsghdr *nh, unsigned short type, const char *s) { - struct rtattr *rta = rtattr_add(nh, type, strlen(s)); + unsigned int strsz = strlen(s) + 1; + struct rtattr *rta;
- memcpy(RTA_DATA(rta), s, strlen(s)); - return rta; -} - -static struct rtattr *rtattr_add_strsz(struct nlmsghdr *nh, unsigned short type, - const char *s) -{ - struct rtattr *rta = rtattr_add(nh, type, strlen(s) + 1); + rta = rtattr_add(nh, type, strsz);
- strcpy(RTA_DATA(rta), s); + memcpy(RTA_DATA(rta), s, strsz); return rta; }
@@ -119,7 +113,7 @@ static int dev_create(const char *dev, const char *link_type,
link_info = rtattr_begin(&req.nh, IFLA_LINKINFO);
- rtattr_add_strsz(&req.nh, IFLA_INFO_KIND, link_type); + rtattr_add_str(&req.nh, IFLA_INFO_KIND, link_type);
if (fill_info_data) { info_data = rtattr_begin(&req.nh, IFLA_INFO_DATA); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index aba4078ae225..16cb973741f4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2062,7 +2062,7 @@ int __kvm_set_memory_region(struct kvm *kvm, return -EINVAL; if ((mem->userspace_addr != old->userspace_addr) || (npages != old->npages) || - ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) + ((mem->flags ^ old->flags) & (KVM_MEM_READONLY | KVM_MEM_GUEST_MEMFD))) return -EINVAL;
if (base_gfn != old->base_gfn)
linux-stable-mirror@lists.linaro.org